1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2019 Broadcom Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
11 #include <linux/module.h>
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <net/udp_tunnel.h>
46 #include <linux/workqueue.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/log2.h>
50 #include <linux/aer.h>
51 #include <linux/bitmap.h>
52 #include <linux/ptp_clock_kernel.h>
53 #include <linux/timecounter.h>
54 #include <linux/cpu_rmap.h>
55 #include <linux/cpumask.h>
56 #include <net/pkt_cls.h>
57 #include <linux/hwmon.h>
58 #include <linux/hwmon-sysfs.h>
59 #include <net/page_pool.h>
63 #include "bnxt_hwrm.h"
65 #include "bnxt_sriov.h"
66 #include "bnxt_ethtool.h"
72 #include "bnxt_devlink.h"
73 #include "bnxt_debugfs.h"
75 #define BNXT_TX_TIMEOUT (5 * HZ)
76 #define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \
79 MODULE_LICENSE("GPL");
80 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
82 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
83 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
84 #define BNXT_RX_COPY_THRESH 256
86 #define BNXT_TX_PUSH_THRESH 164
133 NETXTREME_E_P5_VF_HV,
136 /* indexed by enum above */
137 static const struct {
140 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
141 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
142 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
143 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
144 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
145 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
146 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
147 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
148 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
149 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
150 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
151 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
152 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
153 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
154 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
155 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
156 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
157 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
158 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
159 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
160 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
161 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
162 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
163 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
164 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
165 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
166 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
167 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
168 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
169 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
170 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
171 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
172 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
173 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
174 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
175 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
176 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
177 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
178 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
179 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
180 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
181 [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
182 [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
183 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
184 [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
187 static const struct pci_device_id bnxt_pci_tbl[] = {
188 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
189 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
190 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
191 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
192 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
193 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
194 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
195 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
196 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
197 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
198 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
199 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
200 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
201 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
202 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
203 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
204 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
205 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
206 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
207 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
208 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
209 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
210 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
211 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
212 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
213 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
214 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
215 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
216 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
217 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
218 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
219 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
220 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
221 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
222 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
223 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
224 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
225 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
226 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
227 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
228 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
229 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
230 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
231 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
232 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
233 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
234 #ifdef CONFIG_BNXT_SRIOV
235 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
236 { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
237 { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
238 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
239 { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
240 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
241 { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
242 { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
243 { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
244 { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
245 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
246 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
247 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
248 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
249 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
250 { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
251 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
252 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
253 { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
254 { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
255 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
260 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
262 static const u16 bnxt_vf_req_snif[] = {
266 HWRM_CFA_L2_FILTER_ALLOC,
269 static const u16 bnxt_async_events_arr[] = {
270 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
271 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
272 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
273 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
274 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
275 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
276 ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
277 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
278 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
279 ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
280 ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE,
281 ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
282 ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
283 ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
284 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
287 static struct workqueue_struct *bnxt_pf_wq;
289 static bool bnxt_vf_pciid(enum board_idx idx)
291 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
292 idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
293 idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
294 idx == NETXTREME_E_P5_VF_HV);
297 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
298 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
299 #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
301 #define BNXT_CP_DB_IRQ_DIS(db) \
302 writel(DB_CP_IRQ_DIS_FLAGS, db)
304 #define BNXT_DB_CQ(db, idx) \
305 writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
307 #define BNXT_DB_NQ_P5(db, idx) \
308 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), \
311 #define BNXT_DB_CQ_ARM(db, idx) \
312 writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
314 #define BNXT_DB_NQ_ARM_P5(db, idx) \
315 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx),\
318 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
320 if (bp->flags & BNXT_FLAG_CHIP_P5)
321 BNXT_DB_NQ_P5(db, idx);
326 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
328 if (bp->flags & BNXT_FLAG_CHIP_P5)
329 BNXT_DB_NQ_ARM_P5(db, idx);
331 BNXT_DB_CQ_ARM(db, idx);
334 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
336 if (bp->flags & BNXT_FLAG_CHIP_P5)
337 bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL |
338 RING_CMP(idx), db->doorbell);
343 const u16 bnxt_lhint_arr[] = {
344 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
345 TX_BD_FLAGS_LHINT_512_TO_1023,
346 TX_BD_FLAGS_LHINT_1024_TO_2047,
347 TX_BD_FLAGS_LHINT_1024_TO_2047,
348 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
349 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
350 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
351 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
352 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
353 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
354 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
355 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
356 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
357 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
358 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
359 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
360 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
361 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
362 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
365 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
367 struct metadata_dst *md_dst = skb_metadata_dst(skb);
369 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
372 return md_dst->u.port_info.port_id;
375 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
378 bnxt_db_write(bp, &txr->tx_db, prod);
379 txr->kick_pending = 0;
382 static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
383 struct bnxt_tx_ring_info *txr,
384 struct netdev_queue *txq)
386 netif_tx_stop_queue(txq);
388 /* netif_tx_stop_queue() must be done before checking
389 * tx index in bnxt_tx_avail() below, because in
390 * bnxt_tx_int(), we update tx index before checking for
391 * netif_tx_queue_stopped().
394 if (bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh) {
395 netif_tx_wake_queue(txq);
402 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
404 struct bnxt *bp = netdev_priv(dev);
406 struct tx_bd_ext *txbd1;
407 struct netdev_queue *txq;
410 unsigned int length, pad = 0;
411 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
413 struct pci_dev *pdev = bp->pdev;
414 struct bnxt_tx_ring_info *txr;
415 struct bnxt_sw_tx_bd *tx_buf;
418 i = skb_get_queue_mapping(skb);
419 if (unlikely(i >= bp->tx_nr_rings)) {
420 dev_kfree_skb_any(skb);
421 atomic_long_inc(&dev->tx_dropped);
425 txq = netdev_get_tx_queue(dev, i);
426 txr = &bp->tx_ring[bp->tx_ring_map[i]];
429 free_size = bnxt_tx_avail(bp, txr);
430 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
431 /* We must have raced with NAPI cleanup */
432 if (net_ratelimit() && txr->kick_pending)
433 netif_warn(bp, tx_err, dev,
434 "bnxt: ring busy w/ flush pending!\n");
435 if (bnxt_txr_netif_try_stop_queue(bp, txr, txq))
436 return NETDEV_TX_BUSY;
440 len = skb_headlen(skb);
441 last_frag = skb_shinfo(skb)->nr_frags;
443 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
445 txbd->tx_bd_opaque = prod;
447 tx_buf = &txr->tx_buf_ring[prod];
449 tx_buf->nr_frags = last_frag;
452 cfa_action = bnxt_xmit_get_cfa_action(skb);
453 if (skb_vlan_tag_present(skb)) {
454 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
455 skb_vlan_tag_get(skb);
456 /* Currently supports 8021Q, 8021AD vlan offloads
457 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
459 if (skb->vlan_proto == htons(ETH_P_8021Q))
460 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
463 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
464 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
466 if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) &&
467 atomic_dec_if_positive(&ptp->tx_avail) >= 0) {
468 if (!bnxt_ptp_parse(skb, &ptp->tx_seqid,
471 ptp->tx_hdr_off += VLAN_HLEN;
472 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
473 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
475 atomic_inc(&bp->ptp_cfg->tx_avail);
480 if (unlikely(skb->no_fcs))
481 lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
483 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
485 struct tx_push_buffer *tx_push_buf = txr->tx_push;
486 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
487 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
488 void __iomem *db = txr->tx_db.doorbell;
489 void *pdata = tx_push_buf->data;
493 /* Set COAL_NOW to be ready quickly for the next push */
494 tx_push->tx_bd_len_flags_type =
495 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
496 TX_BD_TYPE_LONG_TX_BD |
497 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
498 TX_BD_FLAGS_COAL_NOW |
499 TX_BD_FLAGS_PACKET_END |
500 (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
502 if (skb->ip_summed == CHECKSUM_PARTIAL)
503 tx_push1->tx_bd_hsize_lflags =
504 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
506 tx_push1->tx_bd_hsize_lflags = 0;
508 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
509 tx_push1->tx_bd_cfa_action =
510 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
512 end = pdata + length;
513 end = PTR_ALIGN(end, 8) - 1;
516 skb_copy_from_linear_data(skb, pdata, len);
518 for (j = 0; j < last_frag; j++) {
519 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
522 fptr = skb_frag_address_safe(frag);
526 memcpy(pdata, fptr, skb_frag_size(frag));
527 pdata += skb_frag_size(frag);
530 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
531 txbd->tx_bd_haddr = txr->data_mapping;
532 prod = NEXT_TX(prod);
533 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
534 memcpy(txbd, tx_push1, sizeof(*txbd));
535 prod = NEXT_TX(prod);
537 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
541 netdev_tx_sent_queue(txq, skb->len);
542 wmb(); /* Sync is_push and byte queue before pushing data */
544 push_len = (length + sizeof(*tx_push) + 7) / 8;
546 __iowrite64_copy(db, tx_push_buf, 16);
547 __iowrite32_copy(db + 4, tx_push_buf + 1,
548 (push_len - 16) << 1);
550 __iowrite64_copy(db, tx_push_buf, push_len);
557 if (length < BNXT_MIN_PKT_SIZE) {
558 pad = BNXT_MIN_PKT_SIZE - length;
559 if (skb_pad(skb, pad))
560 /* SKB already freed. */
561 goto tx_kick_pending;
562 length = BNXT_MIN_PKT_SIZE;
565 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
567 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
570 dma_unmap_addr_set(tx_buf, mapping, mapping);
571 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
572 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
574 txbd->tx_bd_haddr = cpu_to_le64(mapping);
576 prod = NEXT_TX(prod);
577 txbd1 = (struct tx_bd_ext *)
578 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
580 txbd1->tx_bd_hsize_lflags = lflags;
581 if (skb_is_gso(skb)) {
584 if (skb->encapsulation)
585 hdr_len = skb_inner_network_offset(skb) +
586 skb_inner_network_header_len(skb) +
587 inner_tcp_hdrlen(skb);
589 hdr_len = skb_transport_offset(skb) +
592 txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
594 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
595 length = skb_shinfo(skb)->gso_size;
596 txbd1->tx_bd_mss = cpu_to_le32(length);
598 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
599 txbd1->tx_bd_hsize_lflags |=
600 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
601 txbd1->tx_bd_mss = 0;
605 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
606 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
611 flags |= bnxt_lhint_arr[length];
612 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
614 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
615 txbd1->tx_bd_cfa_action =
616 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
617 for (i = 0; i < last_frag; i++) {
618 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
620 prod = NEXT_TX(prod);
621 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
623 len = skb_frag_size(frag);
624 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
627 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
630 tx_buf = &txr->tx_buf_ring[prod];
631 dma_unmap_addr_set(tx_buf, mapping, mapping);
633 txbd->tx_bd_haddr = cpu_to_le64(mapping);
635 flags = len << TX_BD_LEN_SHIFT;
636 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
640 txbd->tx_bd_len_flags_type =
641 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
642 TX_BD_FLAGS_PACKET_END);
644 netdev_tx_sent_queue(txq, skb->len);
646 skb_tx_timestamp(skb);
648 /* Sync BD data before updating doorbell */
651 prod = NEXT_TX(prod);
654 if (!netdev_xmit_more() || netif_xmit_stopped(txq))
655 bnxt_txr_db_kick(bp, txr, prod);
657 txr->kick_pending = 1;
661 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
662 if (netdev_xmit_more() && !tx_buf->is_push)
663 bnxt_txr_db_kick(bp, txr, prod);
665 bnxt_txr_netif_try_stop_queue(bp, txr, txq);
670 if (BNXT_TX_PTP_IS_SET(lflags))
671 atomic_inc(&bp->ptp_cfg->tx_avail);
675 /* start back at beginning and unmap skb */
677 tx_buf = &txr->tx_buf_ring[prod];
678 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
679 skb_headlen(skb), DMA_TO_DEVICE);
680 prod = NEXT_TX(prod);
682 /* unmap remaining mapped pages */
683 for (i = 0; i < last_frag; i++) {
684 prod = NEXT_TX(prod);
685 tx_buf = &txr->tx_buf_ring[prod];
686 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
687 skb_frag_size(&skb_shinfo(skb)->frags[i]),
692 dev_kfree_skb_any(skb);
694 if (txr->kick_pending)
695 bnxt_txr_db_kick(bp, txr, txr->tx_prod);
696 txr->tx_buf_ring[txr->tx_prod].skb = NULL;
697 atomic_long_inc(&dev->tx_dropped);
701 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
703 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
704 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
705 u16 cons = txr->tx_cons;
706 struct pci_dev *pdev = bp->pdev;
708 unsigned int tx_bytes = 0;
710 for (i = 0; i < nr_pkts; i++) {
711 struct bnxt_sw_tx_bd *tx_buf;
712 bool compl_deferred = false;
716 tx_buf = &txr->tx_buf_ring[cons];
717 cons = NEXT_TX(cons);
721 if (tx_buf->is_push) {
726 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
727 skb_headlen(skb), DMA_TO_DEVICE);
728 last = tx_buf->nr_frags;
730 for (j = 0; j < last; j++) {
731 cons = NEXT_TX(cons);
732 tx_buf = &txr->tx_buf_ring[cons];
735 dma_unmap_addr(tx_buf, mapping),
736 skb_frag_size(&skb_shinfo(skb)->frags[j]),
739 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
740 if (bp->flags & BNXT_FLAG_CHIP_P5) {
741 if (!bnxt_get_tx_ts_p5(bp, skb))
742 compl_deferred = true;
744 atomic_inc(&bp->ptp_cfg->tx_avail);
749 cons = NEXT_TX(cons);
751 tx_bytes += skb->len;
753 dev_kfree_skb_any(skb);
756 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
759 /* Need to make the tx_cons update visible to bnxt_start_xmit()
760 * before checking for netif_tx_queue_stopped(). Without the
761 * memory barrier, there is a small possibility that bnxt_start_xmit()
762 * will miss it and cause the queue to be stopped forever.
766 if (unlikely(netif_tx_queue_stopped(txq)) &&
767 bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh &&
768 READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
769 netif_tx_wake_queue(txq);
772 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
773 struct bnxt_rx_ring_info *rxr,
776 struct device *dev = &bp->pdev->dev;
779 page = page_pool_dev_alloc_pages(rxr->page_pool);
783 *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
784 DMA_ATTR_WEAK_ORDERING);
785 if (dma_mapping_error(dev, *mapping)) {
786 page_pool_recycle_direct(rxr->page_pool, page);
789 *mapping += bp->rx_dma_offset;
793 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
797 struct pci_dev *pdev = bp->pdev;
799 data = kmalloc(bp->rx_buf_size, gfp);
803 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
804 bp->rx_buf_use_size, bp->rx_dir,
805 DMA_ATTR_WEAK_ORDERING);
807 if (dma_mapping_error(&pdev->dev, *mapping)) {
814 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
817 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
818 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
821 if (BNXT_RX_PAGE_MODE(bp)) {
823 __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
829 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
831 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
837 rx_buf->data_ptr = data + bp->rx_offset;
839 rx_buf->mapping = mapping;
841 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
845 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
847 u16 prod = rxr->rx_prod;
848 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
849 struct rx_bd *cons_bd, *prod_bd;
851 prod_rx_buf = &rxr->rx_buf_ring[prod];
852 cons_rx_buf = &rxr->rx_buf_ring[cons];
854 prod_rx_buf->data = data;
855 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
857 prod_rx_buf->mapping = cons_rx_buf->mapping;
859 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
860 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
862 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
865 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
867 u16 next, max = rxr->rx_agg_bmap_size;
869 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
871 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
875 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
876 struct bnxt_rx_ring_info *rxr,
880 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
881 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
882 struct pci_dev *pdev = bp->pdev;
885 u16 sw_prod = rxr->rx_sw_agg_prod;
886 unsigned int offset = 0;
888 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
891 page = alloc_page(gfp);
895 rxr->rx_page_offset = 0;
897 offset = rxr->rx_page_offset;
898 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
899 if (rxr->rx_page_offset == PAGE_SIZE)
904 page = alloc_page(gfp);
909 mapping = dma_map_page_attrs(&pdev->dev, page, offset,
910 BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
911 DMA_ATTR_WEAK_ORDERING);
912 if (dma_mapping_error(&pdev->dev, mapping)) {
917 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
918 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
920 __set_bit(sw_prod, rxr->rx_agg_bmap);
921 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
922 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
924 rx_agg_buf->page = page;
925 rx_agg_buf->offset = offset;
926 rx_agg_buf->mapping = mapping;
927 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
928 rxbd->rx_bd_opaque = sw_prod;
932 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
933 struct bnxt_cp_ring_info *cpr,
934 u16 cp_cons, u16 curr)
936 struct rx_agg_cmp *agg;
938 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
939 agg = (struct rx_agg_cmp *)
940 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
944 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
945 struct bnxt_rx_ring_info *rxr,
946 u16 agg_id, u16 curr)
948 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
950 return &tpa_info->agg_arr[curr];
953 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
954 u16 start, u32 agg_bufs, bool tpa)
956 struct bnxt_napi *bnapi = cpr->bnapi;
957 struct bnxt *bp = bnapi->bp;
958 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
959 u16 prod = rxr->rx_agg_prod;
960 u16 sw_prod = rxr->rx_sw_agg_prod;
964 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
967 for (i = 0; i < agg_bufs; i++) {
969 struct rx_agg_cmp *agg;
970 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
971 struct rx_bd *prod_bd;
975 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
977 agg = bnxt_get_agg(bp, cpr, idx, start + i);
978 cons = agg->rx_agg_cmp_opaque;
979 __clear_bit(cons, rxr->rx_agg_bmap);
981 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
982 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
984 __set_bit(sw_prod, rxr->rx_agg_bmap);
985 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
986 cons_rx_buf = &rxr->rx_agg_ring[cons];
988 /* It is possible for sw_prod to be equal to cons, so
989 * set cons_rx_buf->page to NULL first.
991 page = cons_rx_buf->page;
992 cons_rx_buf->page = NULL;
993 prod_rx_buf->page = page;
994 prod_rx_buf->offset = cons_rx_buf->offset;
996 prod_rx_buf->mapping = cons_rx_buf->mapping;
998 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1000 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
1001 prod_bd->rx_bd_opaque = sw_prod;
1003 prod = NEXT_RX_AGG(prod);
1004 sw_prod = NEXT_RX_AGG(sw_prod);
1006 rxr->rx_agg_prod = prod;
1007 rxr->rx_sw_agg_prod = sw_prod;
1010 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
1011 struct bnxt_rx_ring_info *rxr,
1012 u16 cons, void *data, u8 *data_ptr,
1013 dma_addr_t dma_addr,
1014 unsigned int offset_and_len)
1016 unsigned int payload = offset_and_len >> 16;
1017 unsigned int len = offset_and_len & 0xffff;
1019 struct page *page = data;
1020 u16 prod = rxr->rx_prod;
1021 struct sk_buff *skb;
1024 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1025 if (unlikely(err)) {
1026 bnxt_reuse_rx_data(rxr, cons, data);
1029 dma_addr -= bp->rx_dma_offset;
1030 dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
1031 DMA_ATTR_WEAK_ORDERING);
1032 page_pool_release_page(rxr->page_pool, page);
1034 if (unlikely(!payload))
1035 payload = eth_get_headlen(bp->dev, data_ptr, len);
1037 skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
1043 off = (void *)data_ptr - page_address(page);
1044 skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
1045 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
1046 payload + NET_IP_ALIGN);
1048 frag = &skb_shinfo(skb)->frags[0];
1049 skb_frag_size_sub(frag, payload);
1050 skb_frag_off_add(frag, payload);
1051 skb->data_len -= payload;
1052 skb->tail += payload;
1057 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1058 struct bnxt_rx_ring_info *rxr, u16 cons,
1059 void *data, u8 *data_ptr,
1060 dma_addr_t dma_addr,
1061 unsigned int offset_and_len)
1063 u16 prod = rxr->rx_prod;
1064 struct sk_buff *skb;
1067 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1068 if (unlikely(err)) {
1069 bnxt_reuse_rx_data(rxr, cons, data);
1073 skb = build_skb(data, 0);
1074 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1075 bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
1081 skb_reserve(skb, bp->rx_offset);
1082 skb_put(skb, offset_and_len & 0xffff);
1086 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
1087 struct bnxt_cp_ring_info *cpr,
1088 struct sk_buff *skb, u16 idx,
1089 u32 agg_bufs, bool tpa)
1091 struct bnxt_napi *bnapi = cpr->bnapi;
1092 struct pci_dev *pdev = bp->pdev;
1093 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1094 u16 prod = rxr->rx_agg_prod;
1095 bool p5_tpa = false;
1098 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
1101 for (i = 0; i < agg_bufs; i++) {
1103 struct rx_agg_cmp *agg;
1104 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1109 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1111 agg = bnxt_get_agg(bp, cpr, idx, i);
1112 cons = agg->rx_agg_cmp_opaque;
1113 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1114 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1116 cons_rx_buf = &rxr->rx_agg_ring[cons];
1117 skb_fill_page_desc(skb, i, cons_rx_buf->page,
1118 cons_rx_buf->offset, frag_len);
1119 __clear_bit(cons, rxr->rx_agg_bmap);
1121 /* It is possible for bnxt_alloc_rx_page() to allocate
1122 * a sw_prod index that equals the cons index, so we
1123 * need to clear the cons entry now.
1125 mapping = cons_rx_buf->mapping;
1126 page = cons_rx_buf->page;
1127 cons_rx_buf->page = NULL;
1129 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1130 struct skb_shared_info *shinfo;
1131 unsigned int nr_frags;
1133 shinfo = skb_shinfo(skb);
1134 nr_frags = --shinfo->nr_frags;
1135 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
1139 cons_rx_buf->page = page;
1141 /* Update prod since possibly some pages have been
1142 * allocated already.
1144 rxr->rx_agg_prod = prod;
1145 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1149 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1151 DMA_ATTR_WEAK_ORDERING);
1153 skb->data_len += frag_len;
1154 skb->len += frag_len;
1155 skb->truesize += PAGE_SIZE;
1157 prod = NEXT_RX_AGG(prod);
1159 rxr->rx_agg_prod = prod;
1163 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1164 u8 agg_bufs, u32 *raw_cons)
1167 struct rx_agg_cmp *agg;
1169 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1170 last = RING_CMP(*raw_cons);
1171 agg = (struct rx_agg_cmp *)
1172 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1173 return RX_AGG_CMP_VALID(agg, *raw_cons);
1176 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1180 struct bnxt *bp = bnapi->bp;
1181 struct pci_dev *pdev = bp->pdev;
1182 struct sk_buff *skb;
1184 skb = napi_alloc_skb(&bnapi->napi, len);
1188 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1191 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1192 len + NET_IP_ALIGN);
1194 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1201 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1202 u32 *raw_cons, void *cmp)
1204 struct rx_cmp *rxcmp = cmp;
1205 u32 tmp_raw_cons = *raw_cons;
1206 u8 cmp_type, agg_bufs = 0;
1208 cmp_type = RX_CMP_TYPE(rxcmp);
1210 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1211 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1213 RX_CMP_AGG_BUFS_SHIFT;
1214 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1215 struct rx_tpa_end_cmp *tpa_end = cmp;
1217 if (bp->flags & BNXT_FLAG_CHIP_P5)
1220 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1224 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1227 *raw_cons = tmp_raw_cons;
1231 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
1233 if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
1237 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
1239 schedule_delayed_work(&bp->fw_reset_task, delay);
1242 static void bnxt_queue_sp_work(struct bnxt *bp)
1245 queue_work(bnxt_pf_wq, &bp->sp_task);
1247 schedule_work(&bp->sp_task);
1250 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1252 if (!rxr->bnapi->in_reset) {
1253 rxr->bnapi->in_reset = true;
1254 if (bp->flags & BNXT_FLAG_CHIP_P5)
1255 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1257 set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
1258 bnxt_queue_sp_work(bp);
1260 rxr->rx_next_cons = 0xffff;
1263 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1265 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1266 u16 idx = agg_id & MAX_TPA_P5_MASK;
1268 if (test_bit(idx, map->agg_idx_bmap))
1269 idx = find_first_zero_bit(map->agg_idx_bmap,
1270 BNXT_AGG_IDX_BMAP_SIZE);
1271 __set_bit(idx, map->agg_idx_bmap);
1272 map->agg_id_tbl[agg_id] = idx;
1276 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1278 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1280 __clear_bit(idx, map->agg_idx_bmap);
1283 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1285 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1287 return map->agg_id_tbl[agg_id];
1290 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1291 struct rx_tpa_start_cmp *tpa_start,
1292 struct rx_tpa_start_cmp_ext *tpa_start1)
1294 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1295 struct bnxt_tpa_info *tpa_info;
1296 u16 cons, prod, agg_id;
1297 struct rx_bd *prod_bd;
1300 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1301 agg_id = TPA_START_AGG_ID_P5(tpa_start);
1302 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1304 agg_id = TPA_START_AGG_ID(tpa_start);
1306 cons = tpa_start->rx_tpa_start_cmp_opaque;
1307 prod = rxr->rx_prod;
1308 cons_rx_buf = &rxr->rx_buf_ring[cons];
1309 prod_rx_buf = &rxr->rx_buf_ring[prod];
1310 tpa_info = &rxr->rx_tpa[agg_id];
1312 if (unlikely(cons != rxr->rx_next_cons ||
1313 TPA_START_ERROR(tpa_start))) {
1314 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1315 cons, rxr->rx_next_cons,
1316 TPA_START_ERROR_CODE(tpa_start1));
1317 bnxt_sched_reset(bp, rxr);
1320 /* Store cfa_code in tpa_info to use in tpa_end
1321 * completion processing.
1323 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1324 prod_rx_buf->data = tpa_info->data;
1325 prod_rx_buf->data_ptr = tpa_info->data_ptr;
1327 mapping = tpa_info->mapping;
1328 prod_rx_buf->mapping = mapping;
1330 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1332 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1334 tpa_info->data = cons_rx_buf->data;
1335 tpa_info->data_ptr = cons_rx_buf->data_ptr;
1336 cons_rx_buf->data = NULL;
1337 tpa_info->mapping = cons_rx_buf->mapping;
1340 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1341 RX_TPA_START_CMP_LEN_SHIFT;
1342 if (likely(TPA_START_HASH_VALID(tpa_start))) {
1343 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1345 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1346 tpa_info->gso_type = SKB_GSO_TCPV4;
1347 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1348 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
1349 tpa_info->gso_type = SKB_GSO_TCPV6;
1350 tpa_info->rss_hash =
1351 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1353 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1354 tpa_info->gso_type = 0;
1355 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
1357 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1358 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1359 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1360 tpa_info->agg_count = 0;
1362 rxr->rx_prod = NEXT_RX(prod);
1363 cons = NEXT_RX(cons);
1364 rxr->rx_next_cons = NEXT_RX(cons);
1365 cons_rx_buf = &rxr->rx_buf_ring[cons];
1367 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1368 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1369 cons_rx_buf->data = NULL;
1372 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1375 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1379 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1381 struct udphdr *uh = NULL;
1383 if (ip_proto == htons(ETH_P_IP)) {
1384 struct iphdr *iph = (struct iphdr *)skb->data;
1386 if (iph->protocol == IPPROTO_UDP)
1387 uh = (struct udphdr *)(iph + 1);
1389 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1391 if (iph->nexthdr == IPPROTO_UDP)
1392 uh = (struct udphdr *)(iph + 1);
1396 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1398 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1403 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1404 int payload_off, int tcp_ts,
1405 struct sk_buff *skb)
1410 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1411 u32 hdr_info = tpa_info->hdr_info;
1412 bool loopback = false;
1414 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1415 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1416 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1418 /* If the packet is an internal loopback packet, the offsets will
1419 * have an extra 4 bytes.
1421 if (inner_mac_off == 4) {
1423 } else if (inner_mac_off > 4) {
1424 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1427 /* We only support inner iPv4/ipv6. If we don't see the
1428 * correct protocol ID, it must be a loopback packet where
1429 * the offsets are off by 4.
1431 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1435 /* internal loopback packet, subtract all offsets by 4 */
1441 nw_off = inner_ip_off - ETH_HLEN;
1442 skb_set_network_header(skb, nw_off);
1443 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1444 struct ipv6hdr *iph = ipv6_hdr(skb);
1446 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1447 len = skb->len - skb_transport_offset(skb);
1449 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1451 struct iphdr *iph = ip_hdr(skb);
1453 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1454 len = skb->len - skb_transport_offset(skb);
1456 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1459 if (inner_mac_off) { /* tunnel */
1460 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1463 bnxt_gro_tunnel(skb, proto);
1469 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1470 int payload_off, int tcp_ts,
1471 struct sk_buff *skb)
1474 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1475 u32 hdr_info = tpa_info->hdr_info;
1476 int iphdr_len, nw_off;
1478 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1479 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1480 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1482 nw_off = inner_ip_off - ETH_HLEN;
1483 skb_set_network_header(skb, nw_off);
1484 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1485 sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1486 skb_set_transport_header(skb, nw_off + iphdr_len);
1488 if (inner_mac_off) { /* tunnel */
1489 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1492 bnxt_gro_tunnel(skb, proto);
1498 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1499 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1501 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1502 int payload_off, int tcp_ts,
1503 struct sk_buff *skb)
1507 int len, nw_off, tcp_opt_len = 0;
1512 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1515 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1517 skb_set_network_header(skb, nw_off);
1519 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1520 len = skb->len - skb_transport_offset(skb);
1522 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1523 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1524 struct ipv6hdr *iph;
1526 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1528 skb_set_network_header(skb, nw_off);
1529 iph = ipv6_hdr(skb);
1530 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1531 len = skb->len - skb_transport_offset(skb);
1533 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1535 dev_kfree_skb_any(skb);
1539 if (nw_off) /* tunnel */
1540 bnxt_gro_tunnel(skb, skb->protocol);
1545 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1546 struct bnxt_tpa_info *tpa_info,
1547 struct rx_tpa_end_cmp *tpa_end,
1548 struct rx_tpa_end_cmp_ext *tpa_end1,
1549 struct sk_buff *skb)
1555 segs = TPA_END_TPA_SEGS(tpa_end);
1559 NAPI_GRO_CB(skb)->count = segs;
1560 skb_shinfo(skb)->gso_size =
1561 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1562 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1563 if (bp->flags & BNXT_FLAG_CHIP_P5)
1564 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1566 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1567 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1569 tcp_gro_complete(skb);
1574 /* Given the cfa_code of a received packet determine which
1575 * netdev (vf-rep or PF) the packet is destined to.
1577 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1579 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1581 /* if vf-rep dev is NULL, the must belongs to the PF */
1582 return dev ? dev : bp->dev;
1585 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1586 struct bnxt_cp_ring_info *cpr,
1588 struct rx_tpa_end_cmp *tpa_end,
1589 struct rx_tpa_end_cmp_ext *tpa_end1,
1592 struct bnxt_napi *bnapi = cpr->bnapi;
1593 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1594 u8 *data_ptr, agg_bufs;
1596 struct bnxt_tpa_info *tpa_info;
1598 struct sk_buff *skb;
1599 u16 idx = 0, agg_id;
1603 if (unlikely(bnapi->in_reset)) {
1604 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1607 return ERR_PTR(-EBUSY);
1611 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1612 agg_id = TPA_END_AGG_ID_P5(tpa_end);
1613 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1614 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1615 tpa_info = &rxr->rx_tpa[agg_id];
1616 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1617 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1618 agg_bufs, tpa_info->agg_count);
1619 agg_bufs = tpa_info->agg_count;
1621 tpa_info->agg_count = 0;
1622 *event |= BNXT_AGG_EVENT;
1623 bnxt_free_agg_idx(rxr, agg_id);
1625 gro = !!(bp->flags & BNXT_FLAG_GRO);
1627 agg_id = TPA_END_AGG_ID(tpa_end);
1628 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1629 tpa_info = &rxr->rx_tpa[agg_id];
1630 idx = RING_CMP(*raw_cons);
1632 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1633 return ERR_PTR(-EBUSY);
1635 *event |= BNXT_AGG_EVENT;
1636 idx = NEXT_CMP(idx);
1638 gro = !!TPA_END_GRO(tpa_end);
1640 data = tpa_info->data;
1641 data_ptr = tpa_info->data_ptr;
1643 len = tpa_info->len;
1644 mapping = tpa_info->mapping;
1646 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1647 bnxt_abort_tpa(cpr, idx, agg_bufs);
1648 if (agg_bufs > MAX_SKB_FRAGS)
1649 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1650 agg_bufs, (int)MAX_SKB_FRAGS);
1654 if (len <= bp->rx_copy_thresh) {
1655 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1657 bnxt_abort_tpa(cpr, idx, agg_bufs);
1658 cpr->sw_stats.rx.rx_oom_discards += 1;
1663 dma_addr_t new_mapping;
1665 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1667 bnxt_abort_tpa(cpr, idx, agg_bufs);
1668 cpr->sw_stats.rx.rx_oom_discards += 1;
1672 tpa_info->data = new_data;
1673 tpa_info->data_ptr = new_data + bp->rx_offset;
1674 tpa_info->mapping = new_mapping;
1676 skb = build_skb(data, 0);
1677 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1678 bp->rx_buf_use_size, bp->rx_dir,
1679 DMA_ATTR_WEAK_ORDERING);
1683 bnxt_abort_tpa(cpr, idx, agg_bufs);
1684 cpr->sw_stats.rx.rx_oom_discards += 1;
1687 skb_reserve(skb, bp->rx_offset);
1692 skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
1694 /* Page reuse already handled by bnxt_rx_pages(). */
1695 cpr->sw_stats.rx.rx_oom_discards += 1;
1701 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
1703 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1704 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1706 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1707 (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1708 __be16 vlan_proto = htons(tpa_info->metadata >>
1709 RX_CMP_FLAGS2_METADATA_TPID_SFT);
1710 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1712 if (eth_type_vlan(vlan_proto)) {
1713 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1720 skb_checksum_none_assert(skb);
1721 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1722 skb->ip_summed = CHECKSUM_UNNECESSARY;
1724 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1728 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1733 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1734 struct rx_agg_cmp *rx_agg)
1736 u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1737 struct bnxt_tpa_info *tpa_info;
1739 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1740 tpa_info = &rxr->rx_tpa[agg_id];
1741 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1742 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1745 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1746 struct sk_buff *skb)
1748 if (skb->dev != bp->dev) {
1749 /* this packet belongs to a vf-rep */
1750 bnxt_vf_rep_rx(bp, skb);
1753 skb_record_rx_queue(skb, bnapi->index);
1754 napi_gro_receive(&bnapi->napi, skb);
1757 /* returns the following:
1758 * 1 - 1 packet successfully received
1759 * 0 - successful TPA_START, packet not completed yet
1760 * -EBUSY - completion ring does not have all the agg buffers yet
1761 * -ENOMEM - packet aborted due to out of memory
1762 * -EIO - packet aborted due to hw error indicated in BD
1764 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1765 u32 *raw_cons, u8 *event)
1767 struct bnxt_napi *bnapi = cpr->bnapi;
1768 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1769 struct net_device *dev = bp->dev;
1770 struct rx_cmp *rxcmp;
1771 struct rx_cmp_ext *rxcmp1;
1772 u32 tmp_raw_cons = *raw_cons;
1773 u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1774 struct bnxt_sw_rx_bd *rx_buf;
1776 u8 *data_ptr, agg_bufs, cmp_type;
1777 dma_addr_t dma_addr;
1778 struct sk_buff *skb;
1783 rxcmp = (struct rx_cmp *)
1784 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1786 cmp_type = RX_CMP_TYPE(rxcmp);
1788 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
1789 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
1790 goto next_rx_no_prod_no_len;
1793 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1794 cp_cons = RING_CMP(tmp_raw_cons);
1795 rxcmp1 = (struct rx_cmp_ext *)
1796 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1798 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1801 /* The valid test of the entry must be done first before
1802 * reading any further.
1805 prod = rxr->rx_prod;
1807 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1808 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1809 (struct rx_tpa_start_cmp_ext *)rxcmp1);
1811 *event |= BNXT_RX_EVENT;
1812 goto next_rx_no_prod_no_len;
1814 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1815 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
1816 (struct rx_tpa_end_cmp *)rxcmp,
1817 (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
1824 bnxt_deliver_skb(bp, bnapi, skb);
1827 *event |= BNXT_RX_EVENT;
1828 goto next_rx_no_prod_no_len;
1831 cons = rxcmp->rx_cmp_opaque;
1832 if (unlikely(cons != rxr->rx_next_cons)) {
1833 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
1835 /* 0xffff is forced error, don't print it */
1836 if (rxr->rx_next_cons != 0xffff)
1837 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1838 cons, rxr->rx_next_cons);
1839 bnxt_sched_reset(bp, rxr);
1842 goto next_rx_no_prod_no_len;
1844 rx_buf = &rxr->rx_buf_ring[cons];
1845 data = rx_buf->data;
1846 data_ptr = rx_buf->data_ptr;
1849 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1850 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
1853 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1856 cp_cons = NEXT_CMP(cp_cons);
1857 *event |= BNXT_AGG_EVENT;
1859 *event |= BNXT_RX_EVENT;
1861 rx_buf->data = NULL;
1862 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1863 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1865 bnxt_reuse_rx_data(rxr, cons, data);
1867 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
1871 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
1872 bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
1873 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
1874 !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
1875 netdev_warn_once(bp->dev, "RX buffer error %x\n",
1877 bnxt_sched_reset(bp, rxr);
1880 goto next_rx_no_len;
1883 flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
1884 len = flags >> RX_CMP_LEN_SHIFT;
1885 dma_addr = rx_buf->mapping;
1887 if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1892 if (len <= bp->rx_copy_thresh) {
1893 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
1894 bnxt_reuse_rx_data(rxr, cons, data);
1897 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
1899 cpr->sw_stats.rx.rx_oom_discards += 1;
1906 if (rx_buf->data_ptr == data_ptr)
1907 payload = misc & RX_CMP_PAYLOAD_OFFSET;
1910 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
1913 cpr->sw_stats.rx.rx_oom_discards += 1;
1920 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
1922 cpr->sw_stats.rx.rx_oom_discards += 1;
1928 if (RX_CMP_HASH_VALID(rxcmp)) {
1929 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1930 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1932 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1933 if (hash_type != 1 && hash_type != 3)
1934 type = PKT_HASH_TYPE_L3;
1935 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1938 cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1939 skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
1941 if ((rxcmp1->rx_cmp_flags2 &
1942 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1943 (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1944 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1945 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1946 __be16 vlan_proto = htons(meta_data >>
1947 RX_CMP_FLAGS2_METADATA_TPID_SFT);
1949 if (eth_type_vlan(vlan_proto)) {
1950 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1957 skb_checksum_none_assert(skb);
1958 if (RX_CMP_L4_CS_OK(rxcmp1)) {
1959 if (dev->features & NETIF_F_RXCSUM) {
1960 skb->ip_summed = CHECKSUM_UNNECESSARY;
1961 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1964 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1965 if (dev->features & NETIF_F_RXCSUM)
1966 bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++;
1970 if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) ==
1971 RX_CMP_FLAGS_ITYPE_PTP_W_TS)) {
1972 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1973 u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
1976 if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
1977 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
1979 spin_lock_bh(&ptp->ptp_lock);
1980 ns = timecounter_cyc2time(&ptp->tc, ts);
1981 spin_unlock_bh(&ptp->ptp_lock);
1982 memset(skb_hwtstamps(skb), 0,
1983 sizeof(*skb_hwtstamps(skb)));
1984 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
1988 bnxt_deliver_skb(bp, bnapi, skb);
1992 cpr->rx_packets += 1;
1993 cpr->rx_bytes += len;
1996 rxr->rx_prod = NEXT_RX(prod);
1997 rxr->rx_next_cons = NEXT_RX(cons);
1999 next_rx_no_prod_no_len:
2000 *raw_cons = tmp_raw_cons;
2005 /* In netpoll mode, if we are using a combined completion ring, we need to
2006 * discard the rx packets and recycle the buffers.
2008 static int bnxt_force_rx_discard(struct bnxt *bp,
2009 struct bnxt_cp_ring_info *cpr,
2010 u32 *raw_cons, u8 *event)
2012 u32 tmp_raw_cons = *raw_cons;
2013 struct rx_cmp_ext *rxcmp1;
2014 struct rx_cmp *rxcmp;
2019 cp_cons = RING_CMP(tmp_raw_cons);
2020 rxcmp = (struct rx_cmp *)
2021 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2023 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2024 cp_cons = RING_CMP(tmp_raw_cons);
2025 rxcmp1 = (struct rx_cmp_ext *)
2026 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2028 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2031 /* The valid test of the entry must be done first before
2032 * reading any further.
2035 cmp_type = RX_CMP_TYPE(rxcmp);
2036 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
2037 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2038 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2039 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2040 struct rx_tpa_end_cmp_ext *tpa_end1;
2042 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
2043 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
2044 cpu_to_le32(RX_TPA_END_CMP_ERRORS);
2046 rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
2047 if (rc && rc != -EBUSY)
2048 cpr->sw_stats.rx.rx_netpoll_discards += 1;
2052 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
2054 struct bnxt_fw_health *fw_health = bp->fw_health;
2055 u32 reg = fw_health->regs[reg_idx];
2056 u32 reg_type, reg_off, val = 0;
2058 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
2059 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
2061 case BNXT_FW_HEALTH_REG_TYPE_CFG:
2062 pci_read_config_dword(bp->pdev, reg_off, &val);
2064 case BNXT_FW_HEALTH_REG_TYPE_GRC:
2065 reg_off = fw_health->mapped_regs[reg_idx];
2067 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
2068 val = readl(bp->bar0 + reg_off);
2070 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
2071 val = readl(bp->bar1 + reg_off);
2074 if (reg_idx == BNXT_FW_RESET_INPROG_REG)
2075 val &= fw_health->fw_reset_inprog_reg_mask;
2079 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2083 for (i = 0; i < bp->rx_nr_rings; i++) {
2084 u16 grp_idx = bp->rx_ring[i].bnapi->index;
2085 struct bnxt_ring_grp_info *grp_info;
2087 grp_info = &bp->grp_info[grp_idx];
2088 if (grp_info->agg_fw_ring_id == ring_id)
2091 return INVALID_HW_RING_ID;
2094 static void bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
2096 switch (BNXT_EVENT_ERROR_REPORT_TYPE(data1)) {
2097 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
2098 netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
2099 BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
2102 netdev_err(bp->dev, "FW reported unknown error type\n");
2107 #define BNXT_GET_EVENT_PORT(data) \
2109 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
2111 #define BNXT_EVENT_RING_TYPE(data2) \
2113 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
2115 #define BNXT_EVENT_RING_TYPE_RX(data2) \
2116 (BNXT_EVENT_RING_TYPE(data2) == \
2117 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
2119 static int bnxt_async_event_process(struct bnxt *bp,
2120 struct hwrm_async_event_cmpl *cmpl)
2122 u16 event_id = le16_to_cpu(cmpl->event_id);
2123 u32 data1 = le32_to_cpu(cmpl->event_data1);
2124 u32 data2 = le32_to_cpu(cmpl->event_data2);
2126 /* TODO CHIMP_FW: Define event id's for link change, error etc */
2128 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
2129 struct bnxt_link_info *link_info = &bp->link_info;
2132 goto async_event_process_exit;
2134 /* print unsupported speed warning in forced speed mode only */
2135 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2136 (data1 & 0x20000)) {
2137 u16 fw_speed = link_info->force_link_speed;
2138 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2140 if (speed != SPEED_UNKNOWN)
2141 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2144 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
2147 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2148 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2149 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
2151 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
2152 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
2154 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
2155 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
2157 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
2158 u16 port_id = BNXT_GET_EVENT_PORT(data1);
2163 if (bp->pf.port_id != port_id)
2166 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2169 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
2171 goto async_event_process_exit;
2172 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2174 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2175 char *fatal_str = "non-fatal";
2178 goto async_event_process_exit;
2180 bp->fw_reset_timestamp = jiffies;
2181 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2182 if (!bp->fw_reset_min_dsecs)
2183 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2184 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2185 if (!bp->fw_reset_max_dsecs)
2186 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2187 if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2188 fatal_str = "fatal";
2189 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2191 netif_warn(bp, hw, bp->dev,
2192 "Firmware %s reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2193 fatal_str, data1, data2,
2194 bp->fw_reset_min_dsecs * 100,
2195 bp->fw_reset_max_dsecs * 100);
2196 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2199 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2200 struct bnxt_fw_health *fw_health = bp->fw_health;
2203 goto async_event_process_exit;
2205 if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
2206 fw_health->enabled = false;
2207 netif_info(bp, drv, bp->dev,
2208 "Error recovery info: error recovery[0]\n");
2211 fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2212 fw_health->tmr_multiplier =
2213 DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2214 bp->current_interval * 10);
2215 fw_health->tmr_counter = fw_health->tmr_multiplier;
2216 if (!fw_health->enabled)
2217 fw_health->last_fw_heartbeat =
2218 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2219 fw_health->last_fw_reset_cnt =
2220 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2221 netif_info(bp, drv, bp->dev,
2222 "Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n",
2223 fw_health->master, fw_health->last_fw_reset_cnt,
2224 bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG));
2225 if (!fw_health->enabled) {
2226 /* Make sure tmr_counter is set and visible to
2227 * bnxt_health_check() before setting enabled to true.
2230 fw_health->enabled = true;
2232 goto async_event_process_exit;
2234 case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
2235 netif_notice(bp, hw, bp->dev,
2236 "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2238 goto async_event_process_exit;
2239 case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
2240 struct bnxt_rx_ring_info *rxr;
2243 if (bp->flags & BNXT_FLAG_CHIP_P5)
2244 goto async_event_process_exit;
2246 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2247 BNXT_EVENT_RING_TYPE(data2), data1);
2248 if (!BNXT_EVENT_RING_TYPE_RX(data2))
2249 goto async_event_process_exit;
2251 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2252 if (grp_idx == INVALID_HW_RING_ID) {
2253 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2255 goto async_event_process_exit;
2257 rxr = bp->bnapi[grp_idx]->rx_ring;
2258 bnxt_sched_reset(bp, rxr);
2259 goto async_event_process_exit;
2261 case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
2262 struct bnxt_fw_health *fw_health = bp->fw_health;
2264 netif_notice(bp, hw, bp->dev,
2265 "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
2268 fw_health->echo_req_data1 = data1;
2269 fw_health->echo_req_data2 = data2;
2270 set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2273 goto async_event_process_exit;
2275 case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: {
2276 bnxt_ptp_pps_event(bp, data1, data2);
2277 goto async_event_process_exit;
2279 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
2280 bnxt_event_error_report(bp, data1, data2);
2281 goto async_event_process_exit;
2283 case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: {
2284 u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff;
2286 hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED);
2287 goto async_event_process_exit;
2290 goto async_event_process_exit;
2292 bnxt_queue_sp_work(bp);
2293 async_event_process_exit:
2294 bnxt_ulp_async_events(bp, cmpl);
2298 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2300 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2301 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2302 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2303 (struct hwrm_fwd_req_cmpl *)txcmp;
2305 switch (cmpl_type) {
2306 case CMPL_BASE_TYPE_HWRM_DONE:
2307 seq_id = le16_to_cpu(h_cmpl->sequence_id);
2308 hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE);
2311 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2312 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2314 if ((vf_id < bp->pf.first_vf_id) ||
2315 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2316 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2321 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2322 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
2323 bnxt_queue_sp_work(bp);
2326 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2327 bnxt_async_event_process(bp,
2328 (struct hwrm_async_event_cmpl *)txcmp);
2338 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2340 struct bnxt_napi *bnapi = dev_instance;
2341 struct bnxt *bp = bnapi->bp;
2342 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2343 u32 cons = RING_CMP(cpr->cp_raw_cons);
2346 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2347 napi_schedule(&bnapi->napi);
2351 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2353 u32 raw_cons = cpr->cp_raw_cons;
2354 u16 cons = RING_CMP(raw_cons);
2355 struct tx_cmp *txcmp;
2357 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2359 return TX_CMP_VALID(txcmp, raw_cons);
2362 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
2364 struct bnxt_napi *bnapi = dev_instance;
2365 struct bnxt *bp = bnapi->bp;
2366 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2367 u32 cons = RING_CMP(cpr->cp_raw_cons);
2370 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2372 if (!bnxt_has_work(bp, cpr)) {
2373 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
2374 /* return if erroneous interrupt */
2375 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
2379 /* disable ring IRQ */
2380 BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
2382 /* Return here if interrupt is shared and is disabled. */
2383 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2386 napi_schedule(&bnapi->napi);
2390 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2393 struct bnxt_napi *bnapi = cpr->bnapi;
2394 u32 raw_cons = cpr->cp_raw_cons;
2399 struct tx_cmp *txcmp;
2401 cpr->has_more_work = 0;
2402 cpr->had_work_done = 1;
2406 cons = RING_CMP(raw_cons);
2407 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2409 if (!TX_CMP_VALID(txcmp, raw_cons))
2412 /* The valid test of the entry must be done first before
2413 * reading any further.
2416 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
2418 /* return full budget so NAPI will complete. */
2419 if (unlikely(tx_pkts >= bp->tx_wake_thresh)) {
2421 raw_cons = NEXT_RAW_CMP(raw_cons);
2423 cpr->has_more_work = 1;
2426 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2428 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2430 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2432 if (likely(rc >= 0))
2434 /* Increment rx_pkts when rc is -ENOMEM to count towards
2435 * the NAPI budget. Otherwise, we may potentially loop
2436 * here forever if we consistently cannot allocate
2439 else if (rc == -ENOMEM && budget)
2441 else if (rc == -EBUSY) /* partial completion */
2443 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
2444 CMPL_BASE_TYPE_HWRM_DONE) ||
2445 (TX_CMP_TYPE(txcmp) ==
2446 CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
2447 (TX_CMP_TYPE(txcmp) ==
2448 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
2449 bnxt_hwrm_handler(bp, txcmp);
2451 raw_cons = NEXT_RAW_CMP(raw_cons);
2453 if (rx_pkts && rx_pkts == budget) {
2454 cpr->has_more_work = 1;
2459 if (event & BNXT_REDIRECT_EVENT)
2462 if (event & BNXT_TX_EVENT) {
2463 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
2464 u16 prod = txr->tx_prod;
2466 /* Sync BD data before updating doorbell */
2469 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
2472 cpr->cp_raw_cons = raw_cons;
2473 bnapi->tx_pkts += tx_pkts;
2474 bnapi->events |= event;
2478 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
2480 if (bnapi->tx_pkts) {
2481 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
2485 if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
2486 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2488 if (bnapi->events & BNXT_AGG_EVENT)
2489 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2490 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2495 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2498 struct bnxt_napi *bnapi = cpr->bnapi;
2501 rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2503 /* ACK completion ring before freeing tx ring and producing new
2504 * buffers in rx/agg rings to prevent overflowing the completion
2507 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2509 __bnxt_poll_work_done(bp, bnapi);
2513 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2515 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2516 struct bnxt *bp = bnapi->bp;
2517 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2518 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2519 struct tx_cmp *txcmp;
2520 struct rx_cmp_ext *rxcmp1;
2521 u32 cp_cons, tmp_raw_cons;
2522 u32 raw_cons = cpr->cp_raw_cons;
2529 cp_cons = RING_CMP(raw_cons);
2530 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2532 if (!TX_CMP_VALID(txcmp, raw_cons))
2535 /* The valid test of the entry must be done first before
2536 * reading any further.
2539 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2540 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2541 cp_cons = RING_CMP(tmp_raw_cons);
2542 rxcmp1 = (struct rx_cmp_ext *)
2543 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2545 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2548 /* force an error to recycle the buffer */
2549 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2550 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2552 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2553 if (likely(rc == -EIO) && budget)
2555 else if (rc == -EBUSY) /* partial completion */
2557 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2558 CMPL_BASE_TYPE_HWRM_DONE)) {
2559 bnxt_hwrm_handler(bp, txcmp);
2562 "Invalid completion received on special ring\n");
2564 raw_cons = NEXT_RAW_CMP(raw_cons);
2566 if (rx_pkts == budget)
2570 cpr->cp_raw_cons = raw_cons;
2571 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2572 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2574 if (event & BNXT_AGG_EVENT)
2575 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2577 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
2578 napi_complete_done(napi, rx_pkts);
2579 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2584 static int bnxt_poll(struct napi_struct *napi, int budget)
2586 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2587 struct bnxt *bp = bnapi->bp;
2588 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2591 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2592 napi_complete(napi);
2596 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
2598 if (work_done >= budget) {
2600 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2604 if (!bnxt_has_work(bp, cpr)) {
2605 if (napi_complete_done(napi, work_done))
2606 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2610 if (bp->flags & BNXT_FLAG_DIM) {
2611 struct dim_sample dim_sample = {};
2613 dim_update_sample(cpr->event_ctr,
2617 net_dim(&cpr->dim, dim_sample);
2622 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2624 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2625 int i, work_done = 0;
2627 for (i = 0; i < 2; i++) {
2628 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2631 work_done += __bnxt_poll_work(bp, cpr2,
2632 budget - work_done);
2633 cpr->has_more_work |= cpr2->has_more_work;
2639 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2642 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2645 for (i = 0; i < 2; i++) {
2646 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2647 struct bnxt_db_info *db;
2649 if (cpr2 && cpr2->had_work_done) {
2651 bnxt_writeq(bp, db->db_key64 | dbr_type |
2652 RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2653 cpr2->had_work_done = 0;
2656 __bnxt_poll_work_done(bp, bnapi);
2659 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2661 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2662 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2663 u32 raw_cons = cpr->cp_raw_cons;
2664 struct bnxt *bp = bnapi->bp;
2665 struct nqe_cn *nqcmp;
2669 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2670 napi_complete(napi);
2673 if (cpr->has_more_work) {
2674 cpr->has_more_work = 0;
2675 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
2678 cons = RING_CMP(raw_cons);
2679 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2681 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
2682 if (cpr->has_more_work)
2685 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL);
2686 cpr->cp_raw_cons = raw_cons;
2687 if (napi_complete_done(napi, work_done))
2688 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2693 /* The valid test of the entry must be done first before
2694 * reading any further.
2698 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2699 u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2700 struct bnxt_cp_ring_info *cpr2;
2702 cpr2 = cpr->cp_ring_arr[idx];
2703 work_done += __bnxt_poll_work(bp, cpr2,
2704 budget - work_done);
2705 cpr->has_more_work |= cpr2->has_more_work;
2707 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2709 raw_cons = NEXT_RAW_CMP(raw_cons);
2711 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ);
2712 if (raw_cons != cpr->cp_raw_cons) {
2713 cpr->cp_raw_cons = raw_cons;
2714 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
2719 static void bnxt_free_tx_skbs(struct bnxt *bp)
2722 struct pci_dev *pdev = bp->pdev;
2727 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2728 for (i = 0; i < bp->tx_nr_rings; i++) {
2729 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2732 if (!txr->tx_buf_ring)
2735 for (j = 0; j < max_idx;) {
2736 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2737 struct sk_buff *skb;
2740 if (i < bp->tx_nr_rings_xdp &&
2741 tx_buf->action == XDP_REDIRECT) {
2742 dma_unmap_single(&pdev->dev,
2743 dma_unmap_addr(tx_buf, mapping),
2744 dma_unmap_len(tx_buf, len),
2746 xdp_return_frame(tx_buf->xdpf);
2748 tx_buf->xdpf = NULL;
2761 if (tx_buf->is_push) {
2767 dma_unmap_single(&pdev->dev,
2768 dma_unmap_addr(tx_buf, mapping),
2772 last = tx_buf->nr_frags;
2774 for (k = 0; k < last; k++, j++) {
2775 int ring_idx = j & bp->tx_ring_mask;
2776 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2778 tx_buf = &txr->tx_buf_ring[ring_idx];
2781 dma_unmap_addr(tx_buf, mapping),
2782 skb_frag_size(frag), DMA_TO_DEVICE);
2786 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2790 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
2792 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
2793 struct pci_dev *pdev = bp->pdev;
2794 struct bnxt_tpa_idx_map *map;
2795 int i, max_idx, max_agg_idx;
2797 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2798 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2800 goto skip_rx_tpa_free;
2802 for (i = 0; i < bp->max_tpa; i++) {
2803 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
2804 u8 *data = tpa_info->data;
2809 dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
2810 bp->rx_buf_use_size, bp->rx_dir,
2811 DMA_ATTR_WEAK_ORDERING);
2813 tpa_info->data = NULL;
2819 if (!rxr->rx_buf_ring)
2820 goto skip_rx_buf_free;
2822 for (i = 0; i < max_idx; i++) {
2823 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
2824 dma_addr_t mapping = rx_buf->mapping;
2825 void *data = rx_buf->data;
2830 rx_buf->data = NULL;
2831 if (BNXT_RX_PAGE_MODE(bp)) {
2832 mapping -= bp->rx_dma_offset;
2833 dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE,
2835 DMA_ATTR_WEAK_ORDERING);
2836 page_pool_recycle_direct(rxr->page_pool, data);
2838 dma_unmap_single_attrs(&pdev->dev, mapping,
2839 bp->rx_buf_use_size, bp->rx_dir,
2840 DMA_ATTR_WEAK_ORDERING);
2846 if (!rxr->rx_agg_ring)
2847 goto skip_rx_agg_free;
2849 for (i = 0; i < max_agg_idx; i++) {
2850 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
2851 struct page *page = rx_agg_buf->page;
2856 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2857 BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
2858 DMA_ATTR_WEAK_ORDERING);
2860 rx_agg_buf->page = NULL;
2861 __clear_bit(i, rxr->rx_agg_bmap);
2868 __free_page(rxr->rx_page);
2869 rxr->rx_page = NULL;
2871 map = rxr->rx_tpa_idx_map;
2873 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
2876 static void bnxt_free_rx_skbs(struct bnxt *bp)
2883 for (i = 0; i < bp->rx_nr_rings; i++)
2884 bnxt_free_one_rx_ring_skbs(bp, i);
2887 static void bnxt_free_skbs(struct bnxt *bp)
2889 bnxt_free_tx_skbs(bp);
2890 bnxt_free_rx_skbs(bp);
2893 static void bnxt_init_ctx_mem(struct bnxt_mem_init *mem_init, void *p, int len)
2895 u8 init_val = mem_init->init_val;
2896 u16 offset = mem_init->offset;
2902 if (offset == BNXT_MEM_INVALID_OFFSET) {
2903 memset(p, init_val, len);
2906 for (i = 0; i < len; i += mem_init->size)
2907 *(p2 + i + offset) = init_val;
2910 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2912 struct pci_dev *pdev = bp->pdev;
2918 for (i = 0; i < rmem->nr_pages; i++) {
2919 if (!rmem->pg_arr[i])
2922 dma_free_coherent(&pdev->dev, rmem->page_size,
2923 rmem->pg_arr[i], rmem->dma_arr[i]);
2925 rmem->pg_arr[i] = NULL;
2929 size_t pg_tbl_size = rmem->nr_pages * 8;
2931 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2932 pg_tbl_size = rmem->page_size;
2933 dma_free_coherent(&pdev->dev, pg_tbl_size,
2934 rmem->pg_tbl, rmem->pg_tbl_map);
2935 rmem->pg_tbl = NULL;
2937 if (rmem->vmem_size && *rmem->vmem) {
2943 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2945 struct pci_dev *pdev = bp->pdev;
2949 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
2950 valid_bit = PTU_PTE_VALID;
2951 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
2952 size_t pg_tbl_size = rmem->nr_pages * 8;
2954 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2955 pg_tbl_size = rmem->page_size;
2956 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
2963 for (i = 0; i < rmem->nr_pages; i++) {
2964 u64 extra_bits = valid_bit;
2966 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2970 if (!rmem->pg_arr[i])
2974 bnxt_init_ctx_mem(rmem->mem_init, rmem->pg_arr[i],
2976 if (rmem->nr_pages > 1 || rmem->depth > 0) {
2977 if (i == rmem->nr_pages - 2 &&
2978 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2979 extra_bits |= PTU_PTE_NEXT_TO_LAST;
2980 else if (i == rmem->nr_pages - 1 &&
2981 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2982 extra_bits |= PTU_PTE_LAST;
2984 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
2988 if (rmem->vmem_size) {
2989 *rmem->vmem = vzalloc(rmem->vmem_size);
2996 static void bnxt_free_tpa_info(struct bnxt *bp)
3000 for (i = 0; i < bp->rx_nr_rings; i++) {
3001 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3003 kfree(rxr->rx_tpa_idx_map);
3004 rxr->rx_tpa_idx_map = NULL;
3006 kfree(rxr->rx_tpa[0].agg_arr);
3007 rxr->rx_tpa[0].agg_arr = NULL;
3014 static int bnxt_alloc_tpa_info(struct bnxt *bp)
3016 int i, j, total_aggs = 0;
3018 bp->max_tpa = MAX_TPA;
3019 if (bp->flags & BNXT_FLAG_CHIP_P5) {
3020 if (!bp->max_tpa_v2)
3022 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
3023 total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
3026 for (i = 0; i < bp->rx_nr_rings; i++) {
3027 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3028 struct rx_agg_cmp *agg;
3030 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
3035 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3037 agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
3038 rxr->rx_tpa[0].agg_arr = agg;
3041 for (j = 1; j < bp->max_tpa; j++)
3042 rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
3043 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
3045 if (!rxr->rx_tpa_idx_map)
3051 static void bnxt_free_rx_rings(struct bnxt *bp)
3058 bnxt_free_tpa_info(bp);
3059 for (i = 0; i < bp->rx_nr_rings; i++) {
3060 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3061 struct bnxt_ring_struct *ring;
3064 bpf_prog_put(rxr->xdp_prog);
3066 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
3067 xdp_rxq_info_unreg(&rxr->xdp_rxq);
3069 page_pool_destroy(rxr->page_pool);
3070 rxr->page_pool = NULL;
3072 kfree(rxr->rx_agg_bmap);
3073 rxr->rx_agg_bmap = NULL;
3075 ring = &rxr->rx_ring_struct;
3076 bnxt_free_ring(bp, &ring->ring_mem);
3078 ring = &rxr->rx_agg_ring_struct;
3079 bnxt_free_ring(bp, &ring->ring_mem);
3083 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
3084 struct bnxt_rx_ring_info *rxr)
3086 struct page_pool_params pp = { 0 };
3088 pp.pool_size = bp->rx_ring_size;
3089 pp.nid = dev_to_node(&bp->pdev->dev);
3090 pp.dev = &bp->pdev->dev;
3091 pp.dma_dir = DMA_BIDIRECTIONAL;
3093 rxr->page_pool = page_pool_create(&pp);
3094 if (IS_ERR(rxr->page_pool)) {
3095 int err = PTR_ERR(rxr->page_pool);
3097 rxr->page_pool = NULL;
3103 static int bnxt_alloc_rx_rings(struct bnxt *bp)
3105 int i, rc = 0, agg_rings = 0;
3110 if (bp->flags & BNXT_FLAG_AGG_RINGS)
3113 for (i = 0; i < bp->rx_nr_rings; i++) {
3114 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3115 struct bnxt_ring_struct *ring;
3117 ring = &rxr->rx_ring_struct;
3119 rc = bnxt_alloc_rx_page_pool(bp, rxr);
3123 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
3127 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
3131 xdp_rxq_info_unreg(&rxr->xdp_rxq);
3135 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3143 ring = &rxr->rx_agg_ring_struct;
3144 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3149 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3150 mem_size = rxr->rx_agg_bmap_size / 8;
3151 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
3152 if (!rxr->rx_agg_bmap)
3156 if (bp->flags & BNXT_FLAG_TPA)
3157 rc = bnxt_alloc_tpa_info(bp);
3161 static void bnxt_free_tx_rings(struct bnxt *bp)
3164 struct pci_dev *pdev = bp->pdev;
3169 for (i = 0; i < bp->tx_nr_rings; i++) {
3170 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3171 struct bnxt_ring_struct *ring;
3174 dma_free_coherent(&pdev->dev, bp->tx_push_size,
3175 txr->tx_push, txr->tx_push_mapping);
3176 txr->tx_push = NULL;
3179 ring = &txr->tx_ring_struct;
3181 bnxt_free_ring(bp, &ring->ring_mem);
3185 static int bnxt_alloc_tx_rings(struct bnxt *bp)
3188 struct pci_dev *pdev = bp->pdev;
3190 bp->tx_push_size = 0;
3191 if (bp->tx_push_thresh) {
3194 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
3195 bp->tx_push_thresh);
3197 if (push_size > 256) {
3199 bp->tx_push_thresh = 0;
3202 bp->tx_push_size = push_size;
3205 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
3206 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3207 struct bnxt_ring_struct *ring;
3210 ring = &txr->tx_ring_struct;
3212 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3216 ring->grp_idx = txr->bnapi->index;
3217 if (bp->tx_push_size) {
3220 /* One pre-allocated DMA buffer to backup
3223 txr->tx_push = dma_alloc_coherent(&pdev->dev,
3225 &txr->tx_push_mapping,
3231 mapping = txr->tx_push_mapping +
3232 sizeof(struct tx_push_bd);
3233 txr->data_mapping = cpu_to_le64(mapping);
3235 qidx = bp->tc_to_qidx[j];
3236 ring->queue_id = bp->q_info[qidx].queue_id;
3237 if (i < bp->tx_nr_rings_xdp)
3239 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
3245 static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
3247 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3249 kfree(cpr->cp_desc_ring);
3250 cpr->cp_desc_ring = NULL;
3251 ring->ring_mem.pg_arr = NULL;
3252 kfree(cpr->cp_desc_mapping);
3253 cpr->cp_desc_mapping = NULL;
3254 ring->ring_mem.dma_arr = NULL;
3257 static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
3259 cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL);
3260 if (!cpr->cp_desc_ring)
3262 cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping),
3264 if (!cpr->cp_desc_mapping)
3269 static void bnxt_free_all_cp_arrays(struct bnxt *bp)
3275 for (i = 0; i < bp->cp_nr_rings; i++) {
3276 struct bnxt_napi *bnapi = bp->bnapi[i];
3280 bnxt_free_cp_arrays(&bnapi->cp_ring);
3284 static int bnxt_alloc_all_cp_arrays(struct bnxt *bp)
3286 int i, n = bp->cp_nr_pages;
3288 for (i = 0; i < bp->cp_nr_rings; i++) {
3289 struct bnxt_napi *bnapi = bp->bnapi[i];
3294 rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n);
3301 static void bnxt_free_cp_rings(struct bnxt *bp)
3308 for (i = 0; i < bp->cp_nr_rings; i++) {
3309 struct bnxt_napi *bnapi = bp->bnapi[i];
3310 struct bnxt_cp_ring_info *cpr;
3311 struct bnxt_ring_struct *ring;
3317 cpr = &bnapi->cp_ring;
3318 ring = &cpr->cp_ring_struct;
3320 bnxt_free_ring(bp, &ring->ring_mem);
3322 for (j = 0; j < 2; j++) {
3323 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3326 ring = &cpr2->cp_ring_struct;
3327 bnxt_free_ring(bp, &ring->ring_mem);
3328 bnxt_free_cp_arrays(cpr2);
3330 cpr->cp_ring_arr[j] = NULL;
3336 static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
3338 struct bnxt_ring_mem_info *rmem;
3339 struct bnxt_ring_struct *ring;
3340 struct bnxt_cp_ring_info *cpr;
3343 cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
3347 rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
3349 bnxt_free_cp_arrays(cpr);
3353 ring = &cpr->cp_ring_struct;
3354 rmem = &ring->ring_mem;
3355 rmem->nr_pages = bp->cp_nr_pages;
3356 rmem->page_size = HW_CMPD_RING_SIZE;
3357 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3358 rmem->dma_arr = cpr->cp_desc_mapping;
3359 rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
3360 rc = bnxt_alloc_ring(bp, rmem);
3362 bnxt_free_ring(bp, rmem);
3363 bnxt_free_cp_arrays(cpr);
3370 static int bnxt_alloc_cp_rings(struct bnxt *bp)
3372 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
3373 int i, rc, ulp_base_vec, ulp_msix;
3375 ulp_msix = bnxt_get_ulp_msix_num(bp);
3376 ulp_base_vec = bnxt_get_ulp_msix_base(bp);
3377 for (i = 0; i < bp->cp_nr_rings; i++) {
3378 struct bnxt_napi *bnapi = bp->bnapi[i];
3379 struct bnxt_cp_ring_info *cpr;
3380 struct bnxt_ring_struct *ring;
3385 cpr = &bnapi->cp_ring;
3387 ring = &cpr->cp_ring_struct;
3389 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3393 if (ulp_msix && i >= ulp_base_vec)
3394 ring->map_idx = i + ulp_msix;
3398 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3401 if (i < bp->rx_nr_rings) {
3402 struct bnxt_cp_ring_info *cpr2 =
3403 bnxt_alloc_cp_sub_ring(bp);
3405 cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
3408 cpr2->bnapi = bnapi;
3410 if ((sh && i < bp->tx_nr_rings) ||
3411 (!sh && i >= bp->rx_nr_rings)) {
3412 struct bnxt_cp_ring_info *cpr2 =
3413 bnxt_alloc_cp_sub_ring(bp);
3415 cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
3418 cpr2->bnapi = bnapi;
3424 static void bnxt_init_ring_struct(struct bnxt *bp)
3428 for (i = 0; i < bp->cp_nr_rings; i++) {
3429 struct bnxt_napi *bnapi = bp->bnapi[i];
3430 struct bnxt_ring_mem_info *rmem;
3431 struct bnxt_cp_ring_info *cpr;
3432 struct bnxt_rx_ring_info *rxr;
3433 struct bnxt_tx_ring_info *txr;
3434 struct bnxt_ring_struct *ring;
3439 cpr = &bnapi->cp_ring;
3440 ring = &cpr->cp_ring_struct;
3441 rmem = &ring->ring_mem;
3442 rmem->nr_pages = bp->cp_nr_pages;
3443 rmem->page_size = HW_CMPD_RING_SIZE;
3444 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3445 rmem->dma_arr = cpr->cp_desc_mapping;
3446 rmem->vmem_size = 0;
3448 rxr = bnapi->rx_ring;
3452 ring = &rxr->rx_ring_struct;
3453 rmem = &ring->ring_mem;
3454 rmem->nr_pages = bp->rx_nr_pages;
3455 rmem->page_size = HW_RXBD_RING_SIZE;
3456 rmem->pg_arr = (void **)rxr->rx_desc_ring;
3457 rmem->dma_arr = rxr->rx_desc_mapping;
3458 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
3459 rmem->vmem = (void **)&rxr->rx_buf_ring;
3461 ring = &rxr->rx_agg_ring_struct;
3462 rmem = &ring->ring_mem;
3463 rmem->nr_pages = bp->rx_agg_nr_pages;
3464 rmem->page_size = HW_RXBD_RING_SIZE;
3465 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
3466 rmem->dma_arr = rxr->rx_agg_desc_mapping;
3467 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
3468 rmem->vmem = (void **)&rxr->rx_agg_ring;
3471 txr = bnapi->tx_ring;
3475 ring = &txr->tx_ring_struct;
3476 rmem = &ring->ring_mem;
3477 rmem->nr_pages = bp->tx_nr_pages;
3478 rmem->page_size = HW_RXBD_RING_SIZE;
3479 rmem->pg_arr = (void **)txr->tx_desc_ring;
3480 rmem->dma_arr = txr->tx_desc_mapping;
3481 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
3482 rmem->vmem = (void **)&txr->tx_buf_ring;
3486 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
3490 struct rx_bd **rx_buf_ring;
3492 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
3493 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
3497 rxbd = rx_buf_ring[i];
3501 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
3502 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
3503 rxbd->rx_bd_opaque = prod;
3508 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
3510 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
3511 struct net_device *dev = bp->dev;
3515 prod = rxr->rx_prod;
3516 for (i = 0; i < bp->rx_ring_size; i++) {
3517 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
3518 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
3519 ring_nr, i, bp->rx_ring_size);
3522 prod = NEXT_RX(prod);
3524 rxr->rx_prod = prod;
3526 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
3529 prod = rxr->rx_agg_prod;
3530 for (i = 0; i < bp->rx_agg_ring_size; i++) {
3531 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
3532 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
3533 ring_nr, i, bp->rx_ring_size);
3536 prod = NEXT_RX_AGG(prod);
3538 rxr->rx_agg_prod = prod;
3544 for (i = 0; i < bp->max_tpa; i++) {
3545 data = __bnxt_alloc_rx_data(bp, &mapping, GFP_KERNEL);
3549 rxr->rx_tpa[i].data = data;
3550 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
3551 rxr->rx_tpa[i].mapping = mapping;
3557 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
3559 struct bnxt_rx_ring_info *rxr;
3560 struct bnxt_ring_struct *ring;
3563 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
3564 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
3566 if (NET_IP_ALIGN == 2)
3567 type |= RX_BD_FLAGS_SOP;
3569 rxr = &bp->rx_ring[ring_nr];
3570 ring = &rxr->rx_ring_struct;
3571 bnxt_init_rxbd_pages(ring, type);
3573 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
3574 bpf_prog_add(bp->xdp_prog, 1);
3575 rxr->xdp_prog = bp->xdp_prog;
3577 ring->fw_ring_id = INVALID_HW_RING_ID;
3579 ring = &rxr->rx_agg_ring_struct;
3580 ring->fw_ring_id = INVALID_HW_RING_ID;
3582 if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
3583 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
3584 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
3586 bnxt_init_rxbd_pages(ring, type);
3589 return bnxt_alloc_one_rx_ring(bp, ring_nr);
3592 static void bnxt_init_cp_rings(struct bnxt *bp)
3596 for (i = 0; i < bp->cp_nr_rings; i++) {
3597 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
3598 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3600 ring->fw_ring_id = INVALID_HW_RING_ID;
3601 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3602 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3603 for (j = 0; j < 2; j++) {
3604 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3609 ring = &cpr2->cp_ring_struct;
3610 ring->fw_ring_id = INVALID_HW_RING_ID;
3611 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3612 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3617 static int bnxt_init_rx_rings(struct bnxt *bp)
3621 if (BNXT_RX_PAGE_MODE(bp)) {
3622 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
3623 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
3625 bp->rx_offset = BNXT_RX_OFFSET;
3626 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
3629 for (i = 0; i < bp->rx_nr_rings; i++) {
3630 rc = bnxt_init_one_rx_ring(bp, i);
3638 static int bnxt_init_tx_rings(struct bnxt *bp)
3642 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
3643 BNXT_MIN_TX_DESC_CNT);
3645 for (i = 0; i < bp->tx_nr_rings; i++) {
3646 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3647 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3649 ring->fw_ring_id = INVALID_HW_RING_ID;
3655 static void bnxt_free_ring_grps(struct bnxt *bp)
3657 kfree(bp->grp_info);
3658 bp->grp_info = NULL;
3661 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
3666 bp->grp_info = kcalloc(bp->cp_nr_rings,
3667 sizeof(struct bnxt_ring_grp_info),
3672 for (i = 0; i < bp->cp_nr_rings; i++) {
3674 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
3675 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3676 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
3677 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
3678 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3683 static void bnxt_free_vnics(struct bnxt *bp)
3685 kfree(bp->vnic_info);
3686 bp->vnic_info = NULL;
3690 static int bnxt_alloc_vnics(struct bnxt *bp)
3694 #ifdef CONFIG_RFS_ACCEL
3695 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
3696 num_vnics += bp->rx_nr_rings;
3699 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3702 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3707 bp->nr_vnics = num_vnics;
3711 static void bnxt_init_vnics(struct bnxt *bp)
3715 for (i = 0; i < bp->nr_vnics; i++) {
3716 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3719 vnic->fw_vnic_id = INVALID_HW_RING_ID;
3720 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3721 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3723 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3725 if (bp->vnic_info[i].rss_hash_key) {
3727 prandom_bytes(vnic->rss_hash_key,
3730 memcpy(vnic->rss_hash_key,
3731 bp->vnic_info[0].rss_hash_key,
3737 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3741 pages = ring_size / desc_per_pg;
3748 while (pages & (pages - 1))
3754 void bnxt_set_tpa_flags(struct bnxt *bp)
3756 bp->flags &= ~BNXT_FLAG_TPA;
3757 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3759 if (bp->dev->features & NETIF_F_LRO)
3760 bp->flags |= BNXT_FLAG_LRO;
3761 else if (bp->dev->features & NETIF_F_GRO_HW)
3762 bp->flags |= BNXT_FLAG_GRO;
3765 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3768 void bnxt_set_ring_params(struct bnxt *bp)
3770 u32 ring_size, rx_size, rx_space, max_rx_cmpl;
3771 u32 agg_factor = 0, agg_ring_size = 0;
3773 /* 8 for CRC and VLAN */
3774 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3776 rx_space = rx_size + NET_SKB_PAD +
3777 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3779 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3780 ring_size = bp->rx_ring_size;
3781 bp->rx_agg_ring_size = 0;
3782 bp->rx_agg_nr_pages = 0;
3784 if (bp->flags & BNXT_FLAG_TPA)
3785 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
3787 bp->flags &= ~BNXT_FLAG_JUMBO;
3788 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
3791 bp->flags |= BNXT_FLAG_JUMBO;
3792 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3793 if (jumbo_factor > agg_factor)
3794 agg_factor = jumbo_factor;
3797 if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) {
3798 ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
3799 netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n",
3800 bp->rx_ring_size, ring_size);
3801 bp->rx_ring_size = ring_size;
3803 agg_ring_size = ring_size * agg_factor;
3805 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3807 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3808 u32 tmp = agg_ring_size;
3810 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3811 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3812 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3813 tmp, agg_ring_size);
3815 bp->rx_agg_ring_size = agg_ring_size;
3816 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3817 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3818 rx_space = rx_size + NET_SKB_PAD +
3819 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3822 bp->rx_buf_use_size = rx_size;
3823 bp->rx_buf_size = rx_space;
3825 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3826 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3828 ring_size = bp->tx_ring_size;
3829 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3830 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3832 max_rx_cmpl = bp->rx_ring_size;
3833 /* MAX TPA needs to be added because TPA_START completions are
3834 * immediately recycled, so the TPA completions are not bound by
3837 if (bp->flags & BNXT_FLAG_TPA)
3838 max_rx_cmpl += bp->max_tpa;
3839 /* RX and TPA completions are 32-byte, all others are 16-byte */
3840 ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
3841 bp->cp_ring_size = ring_size;
3843 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3844 if (bp->cp_nr_pages > MAX_CP_PAGES) {
3845 bp->cp_nr_pages = MAX_CP_PAGES;
3846 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3847 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3848 ring_size, bp->cp_ring_size);
3850 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3851 bp->cp_ring_mask = bp->cp_bit - 1;
3854 /* Changing allocation mode of RX rings.
3855 * TODO: Update when extending xdp_rxq_info to support allocation modes.
3857 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
3860 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
3863 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
3864 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3865 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
3866 bp->rx_dir = DMA_BIDIRECTIONAL;
3867 bp->rx_skb_func = bnxt_rx_page_skb;
3868 /* Disable LRO or GRO_HW */
3869 netdev_update_features(bp->dev);
3871 bp->dev->max_mtu = bp->max_mtu;
3872 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
3873 bp->rx_dir = DMA_FROM_DEVICE;
3874 bp->rx_skb_func = bnxt_rx_skb;
3879 static void bnxt_free_vnic_attributes(struct bnxt *bp)
3882 struct bnxt_vnic_info *vnic;
3883 struct pci_dev *pdev = bp->pdev;
3888 for (i = 0; i < bp->nr_vnics; i++) {
3889 vnic = &bp->vnic_info[i];
3891 kfree(vnic->fw_grp_ids);
3892 vnic->fw_grp_ids = NULL;
3894 kfree(vnic->uc_list);
3895 vnic->uc_list = NULL;
3897 if (vnic->mc_list) {
3898 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
3899 vnic->mc_list, vnic->mc_list_mapping);
3900 vnic->mc_list = NULL;
3903 if (vnic->rss_table) {
3904 dma_free_coherent(&pdev->dev, vnic->rss_table_size,
3906 vnic->rss_table_dma_addr);
3907 vnic->rss_table = NULL;
3910 vnic->rss_hash_key = NULL;
3915 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
3917 int i, rc = 0, size;
3918 struct bnxt_vnic_info *vnic;
3919 struct pci_dev *pdev = bp->pdev;
3922 for (i = 0; i < bp->nr_vnics; i++) {
3923 vnic = &bp->vnic_info[i];
3925 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
3926 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
3929 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
3930 if (!vnic->uc_list) {
3937 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
3938 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
3940 dma_alloc_coherent(&pdev->dev,
3942 &vnic->mc_list_mapping,
3944 if (!vnic->mc_list) {
3950 if (bp->flags & BNXT_FLAG_CHIP_P5)
3951 goto vnic_skip_grps;
3953 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3954 max_rings = bp->rx_nr_rings;
3958 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
3959 if (!vnic->fw_grp_ids) {
3964 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
3965 !(vnic->flags & BNXT_VNIC_RSS_FLAG))
3968 /* Allocate rss table and hash key */
3969 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
3970 if (bp->flags & BNXT_FLAG_CHIP_P5)
3971 size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
3973 vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
3974 vnic->rss_table = dma_alloc_coherent(&pdev->dev,
3975 vnic->rss_table_size,
3976 &vnic->rss_table_dma_addr,
3978 if (!vnic->rss_table) {
3983 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
3984 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
3992 static void bnxt_free_hwrm_resources(struct bnxt *bp)
3994 struct bnxt_hwrm_wait_token *token;
3996 dma_pool_destroy(bp->hwrm_dma_pool);
3997 bp->hwrm_dma_pool = NULL;
4000 hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node)
4001 WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED);
4005 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
4007 bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev,
4009 BNXT_HWRM_DMA_ALIGN, 0);
4010 if (!bp->hwrm_dma_pool)
4013 INIT_HLIST_HEAD(&bp->hwrm_pending_list);
4018 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
4020 kfree(stats->hw_masks);
4021 stats->hw_masks = NULL;
4022 kfree(stats->sw_stats);
4023 stats->sw_stats = NULL;
4024 if (stats->hw_stats) {
4025 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
4026 stats->hw_stats_map);
4027 stats->hw_stats = NULL;
4031 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
4034 stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
4035 &stats->hw_stats_map, GFP_KERNEL);
4036 if (!stats->hw_stats)
4039 stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
4040 if (!stats->sw_stats)
4044 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
4045 if (!stats->hw_masks)
4051 bnxt_free_stats_mem(bp, stats);
4055 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
4059 for (i = 0; i < count; i++)
4063 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
4067 for (i = 0; i < count; i++)
4068 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
4071 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
4072 struct bnxt_stats_mem *stats)
4074 struct hwrm_func_qstats_ext_output *resp;
4075 struct hwrm_func_qstats_ext_input *req;
4079 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
4080 !(bp->flags & BNXT_FLAG_CHIP_P5))
4083 rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
4087 req->fid = cpu_to_le16(0xffff);
4088 req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
4090 resp = hwrm_req_hold(bp, req);
4091 rc = hwrm_req_send(bp, req);
4093 hw_masks = &resp->rx_ucast_pkts;
4094 bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
4096 hwrm_req_drop(bp, req);
4100 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
4101 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
4103 static void bnxt_init_stats(struct bnxt *bp)
4105 struct bnxt_napi *bnapi = bp->bnapi[0];
4106 struct bnxt_cp_ring_info *cpr;
4107 struct bnxt_stats_mem *stats;
4108 __le64 *rx_stats, *tx_stats;
4109 int rc, rx_count, tx_count;
4110 u64 *rx_masks, *tx_masks;
4114 cpr = &bnapi->cp_ring;
4115 stats = &cpr->stats;
4116 rc = bnxt_hwrm_func_qstat_ext(bp, stats);
4118 if (bp->flags & BNXT_FLAG_CHIP_P5)
4119 mask = (1ULL << 48) - 1;
4122 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
4124 if (bp->flags & BNXT_FLAG_PORT_STATS) {
4125 stats = &bp->port_stats;
4126 rx_stats = stats->hw_stats;
4127 rx_masks = stats->hw_masks;
4128 rx_count = sizeof(struct rx_port_stats) / 8;
4129 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4130 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4131 tx_count = sizeof(struct tx_port_stats) / 8;
4133 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
4134 rc = bnxt_hwrm_port_qstats(bp, flags);
4136 mask = (1ULL << 40) - 1;
4138 bnxt_fill_masks(rx_masks, mask, rx_count);
4139 bnxt_fill_masks(tx_masks, mask, tx_count);
4141 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4142 bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
4143 bnxt_hwrm_port_qstats(bp, 0);
4146 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
4147 stats = &bp->rx_port_stats_ext;
4148 rx_stats = stats->hw_stats;
4149 rx_masks = stats->hw_masks;
4150 rx_count = sizeof(struct rx_port_stats_ext) / 8;
4151 stats = &bp->tx_port_stats_ext;
4152 tx_stats = stats->hw_stats;
4153 tx_masks = stats->hw_masks;
4154 tx_count = sizeof(struct tx_port_stats_ext) / 8;
4156 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
4157 rc = bnxt_hwrm_port_qstats_ext(bp, flags);
4159 mask = (1ULL << 40) - 1;
4161 bnxt_fill_masks(rx_masks, mask, rx_count);
4163 bnxt_fill_masks(tx_masks, mask, tx_count);
4165 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4167 bnxt_copy_hw_masks(tx_masks, tx_stats,
4169 bnxt_hwrm_port_qstats_ext(bp, 0);
4174 static void bnxt_free_port_stats(struct bnxt *bp)
4176 bp->flags &= ~BNXT_FLAG_PORT_STATS;
4177 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
4179 bnxt_free_stats_mem(bp, &bp->port_stats);
4180 bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
4181 bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
4184 static void bnxt_free_ring_stats(struct bnxt *bp)
4191 for (i = 0; i < bp->cp_nr_rings; i++) {
4192 struct bnxt_napi *bnapi = bp->bnapi[i];
4193 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4195 bnxt_free_stats_mem(bp, &cpr->stats);
4199 static int bnxt_alloc_stats(struct bnxt *bp)
4204 size = bp->hw_ring_stats_size;
4206 for (i = 0; i < bp->cp_nr_rings; i++) {
4207 struct bnxt_napi *bnapi = bp->bnapi[i];
4208 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4210 cpr->stats.len = size;
4211 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
4215 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
4218 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
4221 if (bp->port_stats.hw_stats)
4222 goto alloc_ext_stats;
4224 bp->port_stats.len = BNXT_PORT_STATS_SIZE;
4225 rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
4229 bp->flags |= BNXT_FLAG_PORT_STATS;
4232 /* Display extended statistics only if FW supports it */
4233 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
4234 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
4237 if (bp->rx_port_stats_ext.hw_stats)
4238 goto alloc_tx_ext_stats;
4240 bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
4241 rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
4242 /* Extended stats are optional */
4247 if (bp->tx_port_stats_ext.hw_stats)
4250 if (bp->hwrm_spec_code >= 0x10902 ||
4251 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
4252 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
4253 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
4254 /* Extended stats are optional */
4258 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
4262 static void bnxt_clear_ring_indices(struct bnxt *bp)
4269 for (i = 0; i < bp->cp_nr_rings; i++) {
4270 struct bnxt_napi *bnapi = bp->bnapi[i];
4271 struct bnxt_cp_ring_info *cpr;
4272 struct bnxt_rx_ring_info *rxr;
4273 struct bnxt_tx_ring_info *txr;
4278 cpr = &bnapi->cp_ring;
4279 cpr->cp_raw_cons = 0;
4281 txr = bnapi->tx_ring;
4287 rxr = bnapi->rx_ring;
4290 rxr->rx_agg_prod = 0;
4291 rxr->rx_sw_agg_prod = 0;
4292 rxr->rx_next_cons = 0;
4297 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
4299 #ifdef CONFIG_RFS_ACCEL
4302 /* Under rtnl_lock and all our NAPIs have been disabled. It's
4303 * safe to delete the hash table.
4305 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
4306 struct hlist_head *head;
4307 struct hlist_node *tmp;
4308 struct bnxt_ntuple_filter *fltr;
4310 head = &bp->ntp_fltr_hash_tbl[i];
4311 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
4312 hlist_del(&fltr->hash);
4317 kfree(bp->ntp_fltr_bmap);
4318 bp->ntp_fltr_bmap = NULL;
4320 bp->ntp_fltr_count = 0;
4324 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
4326 #ifdef CONFIG_RFS_ACCEL
4329 if (!(bp->flags & BNXT_FLAG_RFS))
4332 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
4333 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
4335 bp->ntp_fltr_count = 0;
4336 bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
4340 if (!bp->ntp_fltr_bmap)
4349 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
4351 bnxt_free_vnic_attributes(bp);
4352 bnxt_free_tx_rings(bp);
4353 bnxt_free_rx_rings(bp);
4354 bnxt_free_cp_rings(bp);
4355 bnxt_free_all_cp_arrays(bp);
4356 bnxt_free_ntp_fltrs(bp, irq_re_init);
4358 bnxt_free_ring_stats(bp);
4359 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
4360 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
4361 bnxt_free_port_stats(bp);
4362 bnxt_free_ring_grps(bp);
4363 bnxt_free_vnics(bp);
4364 kfree(bp->tx_ring_map);
4365 bp->tx_ring_map = NULL;
4373 bnxt_clear_ring_indices(bp);
4377 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
4379 int i, j, rc, size, arr_size;
4383 /* Allocate bnapi mem pointer array and mem block for
4386 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
4388 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
4389 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
4395 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
4396 bp->bnapi[i] = bnapi;
4397 bp->bnapi[i]->index = i;
4398 bp->bnapi[i]->bp = bp;
4399 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4400 struct bnxt_cp_ring_info *cpr =
4401 &bp->bnapi[i]->cp_ring;
4403 cpr->cp_ring_struct.ring_mem.flags =
4404 BNXT_RMEM_RING_PTE_FLAG;
4408 bp->rx_ring = kcalloc(bp->rx_nr_rings,
4409 sizeof(struct bnxt_rx_ring_info),
4414 for (i = 0; i < bp->rx_nr_rings; i++) {
4415 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4417 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4418 rxr->rx_ring_struct.ring_mem.flags =
4419 BNXT_RMEM_RING_PTE_FLAG;
4420 rxr->rx_agg_ring_struct.ring_mem.flags =
4421 BNXT_RMEM_RING_PTE_FLAG;
4423 rxr->bnapi = bp->bnapi[i];
4424 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
4427 bp->tx_ring = kcalloc(bp->tx_nr_rings,
4428 sizeof(struct bnxt_tx_ring_info),
4433 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
4436 if (!bp->tx_ring_map)
4439 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4442 j = bp->rx_nr_rings;
4444 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
4445 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4447 if (bp->flags & BNXT_FLAG_CHIP_P5)
4448 txr->tx_ring_struct.ring_mem.flags =
4449 BNXT_RMEM_RING_PTE_FLAG;
4450 txr->bnapi = bp->bnapi[j];
4451 bp->bnapi[j]->tx_ring = txr;
4452 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
4453 if (i >= bp->tx_nr_rings_xdp) {
4454 txr->txq_index = i - bp->tx_nr_rings_xdp;
4455 bp->bnapi[j]->tx_int = bnxt_tx_int;
4457 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
4458 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
4462 rc = bnxt_alloc_stats(bp);
4465 bnxt_init_stats(bp);
4467 rc = bnxt_alloc_ntp_fltrs(bp);
4471 rc = bnxt_alloc_vnics(bp);
4476 rc = bnxt_alloc_all_cp_arrays(bp);
4480 bnxt_init_ring_struct(bp);
4482 rc = bnxt_alloc_rx_rings(bp);
4486 rc = bnxt_alloc_tx_rings(bp);
4490 rc = bnxt_alloc_cp_rings(bp);
4494 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
4495 BNXT_VNIC_UCAST_FLAG;
4496 rc = bnxt_alloc_vnic_attributes(bp);
4502 bnxt_free_mem(bp, true);
4506 static void bnxt_disable_int(struct bnxt *bp)
4513 for (i = 0; i < bp->cp_nr_rings; i++) {
4514 struct bnxt_napi *bnapi = bp->bnapi[i];
4515 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4516 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4518 if (ring->fw_ring_id != INVALID_HW_RING_ID)
4519 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
4523 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
4525 struct bnxt_napi *bnapi = bp->bnapi[n];
4526 struct bnxt_cp_ring_info *cpr;
4528 cpr = &bnapi->cp_ring;
4529 return cpr->cp_ring_struct.map_idx;
4532 static void bnxt_disable_int_sync(struct bnxt *bp)
4539 atomic_inc(&bp->intr_sem);
4541 bnxt_disable_int(bp);
4542 for (i = 0; i < bp->cp_nr_rings; i++) {
4543 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
4545 synchronize_irq(bp->irq_tbl[map_idx].vector);
4549 static void bnxt_enable_int(struct bnxt *bp)
4553 atomic_set(&bp->intr_sem, 0);
4554 for (i = 0; i < bp->cp_nr_rings; i++) {
4555 struct bnxt_napi *bnapi = bp->bnapi[i];
4556 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4558 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
4562 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
4565 DECLARE_BITMAP(async_events_bmap, 256);
4566 u32 *events = (u32 *)async_events_bmap;
4567 struct hwrm_func_drv_rgtr_output *resp;
4568 struct hwrm_func_drv_rgtr_input *req;
4572 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR);
4576 req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
4577 FUNC_DRV_RGTR_REQ_ENABLES_VER |
4578 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4580 req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
4581 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
4582 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
4583 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
4584 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
4585 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
4586 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
4587 req->flags = cpu_to_le32(flags);
4588 req->ver_maj_8b = DRV_VER_MAJ;
4589 req->ver_min_8b = DRV_VER_MIN;
4590 req->ver_upd_8b = DRV_VER_UPD;
4591 req->ver_maj = cpu_to_le16(DRV_VER_MAJ);
4592 req->ver_min = cpu_to_le16(DRV_VER_MIN);
4593 req->ver_upd = cpu_to_le16(DRV_VER_UPD);
4599 memset(data, 0, sizeof(data));
4600 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
4601 u16 cmd = bnxt_vf_req_snif[i];
4602 unsigned int bit, idx;
4606 data[idx] |= 1 << bit;
4609 for (i = 0; i < 8; i++)
4610 req->vf_req_fwd[i] = cpu_to_le32(data[i]);
4613 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
4616 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
4617 req->flags |= cpu_to_le32(
4618 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
4620 memset(async_events_bmap, 0, sizeof(async_events_bmap));
4621 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
4622 u16 event_id = bnxt_async_events_arr[i];
4624 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
4625 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
4627 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
4629 if (bmap && bmap_size) {
4630 for (i = 0; i < bmap_size; i++) {
4631 if (test_bit(i, bmap))
4632 __set_bit(i, async_events_bmap);
4635 for (i = 0; i < 8; i++)
4636 req->async_event_fwd[i] |= cpu_to_le32(events[i]);
4640 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4642 resp = hwrm_req_hold(bp, req);
4643 rc = hwrm_req_send(bp, req);
4645 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
4647 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
4648 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4650 hwrm_req_drop(bp, req);
4654 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4656 struct hwrm_func_drv_unrgtr_input *req;
4659 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
4662 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR);
4665 return hwrm_req_send(bp, req);
4668 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4670 struct hwrm_tunnel_dst_port_free_input *req;
4673 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN &&
4674 bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID)
4676 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE &&
4677 bp->nge_fw_dst_port_id == INVALID_HW_RING_ID)
4680 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE);
4684 req->tunnel_type = tunnel_type;
4686 switch (tunnel_type) {
4687 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
4688 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
4690 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
4692 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
4693 req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
4695 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
4701 rc = hwrm_req_send(bp, req);
4703 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4708 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4711 struct hwrm_tunnel_dst_port_alloc_output *resp;
4712 struct hwrm_tunnel_dst_port_alloc_input *req;
4715 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC);
4719 req->tunnel_type = tunnel_type;
4720 req->tunnel_dst_port_val = port;
4722 resp = hwrm_req_hold(bp, req);
4723 rc = hwrm_req_send(bp, req);
4725 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4730 switch (tunnel_type) {
4731 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
4732 bp->vxlan_port = port;
4733 bp->vxlan_fw_dst_port_id =
4734 le16_to_cpu(resp->tunnel_dst_port_id);
4736 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
4737 bp->nge_port = port;
4738 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
4745 hwrm_req_drop(bp, req);
4749 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4751 struct hwrm_cfa_l2_set_rx_mask_input *req;
4752 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4755 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK);
4759 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4760 req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4761 req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4762 req->mask = cpu_to_le32(vnic->rx_mask);
4763 return hwrm_req_send_silent(bp, req);
4766 #ifdef CONFIG_RFS_ACCEL
4767 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4768 struct bnxt_ntuple_filter *fltr)
4770 struct hwrm_cfa_ntuple_filter_free_input *req;
4773 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
4777 req->ntuple_filter_id = fltr->filter_id;
4778 return hwrm_req_send(bp, req);
4781 #define BNXT_NTP_FLTR_FLAGS \
4782 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
4783 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
4784 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
4785 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
4786 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
4787 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
4788 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
4789 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
4790 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
4791 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
4792 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
4793 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
4794 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
4795 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
4797 #define BNXT_NTP_TUNNEL_FLTR_FLAG \
4798 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4800 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4801 struct bnxt_ntuple_filter *fltr)
4803 struct hwrm_cfa_ntuple_filter_alloc_output *resp;
4804 struct hwrm_cfa_ntuple_filter_alloc_input *req;
4805 struct flow_keys *keys = &fltr->fkeys;
4806 struct bnxt_vnic_info *vnic;
4810 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
4814 req->l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
4816 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
4817 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
4818 req->dst_id = cpu_to_le16(fltr->rxq);
4820 vnic = &bp->vnic_info[fltr->rxq + 1];
4821 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
4823 req->flags = cpu_to_le32(flags);
4824 req->enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
4826 req->ethertype = htons(ETH_P_IP);
4827 memcpy(req->src_macaddr, fltr->src_mac_addr, ETH_ALEN);
4828 req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
4829 req->ip_protocol = keys->basic.ip_proto;
4831 if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4834 req->ethertype = htons(ETH_P_IPV6);
4836 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
4837 *(struct in6_addr *)&req->src_ipaddr[0] =
4838 keys->addrs.v6addrs.src;
4839 *(struct in6_addr *)&req->dst_ipaddr[0] =
4840 keys->addrs.v6addrs.dst;
4841 for (i = 0; i < 4; i++) {
4842 req->src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4843 req->dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4846 req->src_ipaddr[0] = keys->addrs.v4addrs.src;
4847 req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4848 req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
4849 req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4851 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
4852 req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
4854 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
4857 req->src_port = keys->ports.src;
4858 req->src_port_mask = cpu_to_be16(0xffff);
4859 req->dst_port = keys->ports.dst;
4860 req->dst_port_mask = cpu_to_be16(0xffff);
4862 resp = hwrm_req_hold(bp, req);
4863 rc = hwrm_req_send(bp, req);
4865 fltr->filter_id = resp->ntuple_filter_id;
4866 hwrm_req_drop(bp, req);
4871 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
4874 struct hwrm_cfa_l2_filter_alloc_output *resp;
4875 struct hwrm_cfa_l2_filter_alloc_input *req;
4878 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
4882 req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
4883 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
4885 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
4886 req->dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
4888 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
4889 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
4890 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
4891 memcpy(req->l2_addr, mac_addr, ETH_ALEN);
4892 req->l2_addr_mask[0] = 0xff;
4893 req->l2_addr_mask[1] = 0xff;
4894 req->l2_addr_mask[2] = 0xff;
4895 req->l2_addr_mask[3] = 0xff;
4896 req->l2_addr_mask[4] = 0xff;
4897 req->l2_addr_mask[5] = 0xff;
4899 resp = hwrm_req_hold(bp, req);
4900 rc = hwrm_req_send(bp, req);
4902 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
4904 hwrm_req_drop(bp, req);
4908 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
4910 struct hwrm_cfa_l2_filter_free_input *req;
4911 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
4914 /* Any associated ntuple filters will also be cleared by firmware. */
4915 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
4918 hwrm_req_hold(bp, req);
4919 for (i = 0; i < num_of_vnics; i++) {
4920 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4922 for (j = 0; j < vnic->uc_filter_count; j++) {
4923 req->l2_filter_id = vnic->fw_l2_filter_id[j];
4925 rc = hwrm_req_send(bp, req);
4927 vnic->uc_filter_count = 0;
4929 hwrm_req_drop(bp, req);
4933 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
4935 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4936 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
4937 struct hwrm_vnic_tpa_cfg_input *req;
4940 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
4943 rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG);
4948 u16 mss = bp->dev->mtu - 40;
4949 u32 nsegs, n, segs = 0, flags;
4951 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
4952 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
4953 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
4954 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
4955 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
4956 if (tpa_flags & BNXT_FLAG_GRO)
4957 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
4959 req->flags = cpu_to_le32(flags);
4962 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
4963 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
4964 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
4966 /* Number of segs are log2 units, and first packet is not
4967 * included as part of this units.
4969 if (mss <= BNXT_RX_PAGE_SIZE) {
4970 n = BNXT_RX_PAGE_SIZE / mss;
4971 nsegs = (MAX_SKB_FRAGS - 1) * n;
4973 n = mss / BNXT_RX_PAGE_SIZE;
4974 if (mss & (BNXT_RX_PAGE_SIZE - 1))
4976 nsegs = (MAX_SKB_FRAGS - n) / n;
4979 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4980 segs = MAX_TPA_SEGS_P5;
4981 max_aggs = bp->max_tpa;
4983 segs = ilog2(nsegs);
4985 req->max_agg_segs = cpu_to_le16(segs);
4986 req->max_aggs = cpu_to_le16(max_aggs);
4988 req->min_agg_len = cpu_to_le32(512);
4990 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4992 return hwrm_req_send(bp, req);
4995 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
4997 struct bnxt_ring_grp_info *grp_info;
4999 grp_info = &bp->grp_info[ring->grp_idx];
5000 return grp_info->cp_fw_ring_id;
5003 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
5005 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5006 struct bnxt_napi *bnapi = rxr->bnapi;
5007 struct bnxt_cp_ring_info *cpr;
5009 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
5010 return cpr->cp_ring_struct.fw_ring_id;
5012 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
5016 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
5018 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5019 struct bnxt_napi *bnapi = txr->bnapi;
5020 struct bnxt_cp_ring_info *cpr;
5022 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
5023 return cpr->cp_ring_struct.fw_ring_id;
5025 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
5029 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
5033 if (bp->flags & BNXT_FLAG_CHIP_P5)
5034 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
5036 entries = HW_HASH_INDEX_SIZE;
5038 bp->rss_indir_tbl_entries = entries;
5039 bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
5041 if (!bp->rss_indir_tbl)
5046 static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
5048 u16 max_rings, max_entries, pad, i;
5050 if (!bp->rx_nr_rings)
5053 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5054 max_rings = bp->rx_nr_rings - 1;
5056 max_rings = bp->rx_nr_rings;
5058 max_entries = bnxt_get_rxfh_indir_size(bp->dev);
5060 for (i = 0; i < max_entries; i++)
5061 bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
5063 pad = bp->rss_indir_tbl_entries - max_entries;
5065 memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
5068 static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
5070 u16 i, tbl_size, max_ring = 0;
5072 if (!bp->rss_indir_tbl)
5075 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5076 for (i = 0; i < tbl_size; i++)
5077 max_ring = max(max_ring, bp->rss_indir_tbl[i]);
5081 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
5083 if (bp->flags & BNXT_FLAG_CHIP_P5)
5084 return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
5085 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5090 static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5092 bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
5095 /* Fill the RSS indirection table with ring group ids */
5096 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
5098 j = bp->rss_indir_tbl[i];
5099 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
5103 static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
5104 struct bnxt_vnic_info *vnic)
5106 __le16 *ring_tbl = vnic->rss_table;
5107 struct bnxt_rx_ring_info *rxr;
5110 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5112 for (i = 0; i < tbl_size; i++) {
5115 j = bp->rss_indir_tbl[i];
5116 rxr = &bp->rx_ring[j];
5118 ring_id = rxr->rx_ring_struct.fw_ring_id;
5119 *ring_tbl++ = cpu_to_le16(ring_id);
5120 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5121 *ring_tbl++ = cpu_to_le16(ring_id);
5125 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5127 if (bp->flags & BNXT_FLAG_CHIP_P5)
5128 __bnxt_fill_hw_rss_tbl_p5(bp, vnic);
5130 __bnxt_fill_hw_rss_tbl(bp, vnic);
5133 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
5135 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5136 struct hwrm_vnic_rss_cfg_input *req;
5139 if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
5140 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
5143 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
5148 bnxt_fill_hw_rss_tbl(bp, vnic);
5149 req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
5150 req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5151 req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
5152 req->hash_key_tbl_addr =
5153 cpu_to_le64(vnic->rss_hash_key_dma_addr);
5155 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5156 return hwrm_req_send(bp, req);
5159 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
5161 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5162 struct hwrm_vnic_rss_cfg_input *req;
5163 dma_addr_t ring_tbl_map;
5167 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
5171 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5173 return hwrm_req_send(bp, req);
5175 bnxt_fill_hw_rss_tbl(bp, vnic);
5176 req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
5177 req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5178 req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
5179 ring_tbl_map = vnic->rss_table_dma_addr;
5180 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
5182 hwrm_req_hold(bp, req);
5183 for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
5184 req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
5185 req->ring_table_pair_index = i;
5186 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
5187 rc = hwrm_req_send(bp, req);
5193 hwrm_req_drop(bp, req);
5197 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
5199 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5200 struct hwrm_vnic_plcmodes_cfg_input *req;
5203 rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG);
5207 req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
5208 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
5209 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
5211 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
5212 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
5213 /* thresholds not implemented in firmware yet */
5214 req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
5215 req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
5216 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5217 return hwrm_req_send(bp, req);
5220 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
5223 struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
5225 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
5228 req->rss_cos_lb_ctx_id =
5229 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
5231 hwrm_req_send(bp, req);
5232 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
5235 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
5239 for (i = 0; i < bp->nr_vnics; i++) {
5240 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5242 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
5243 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
5244 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
5247 bp->rsscos_nr_ctxs = 0;
5250 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
5252 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
5253 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
5256 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
5260 resp = hwrm_req_hold(bp, req);
5261 rc = hwrm_req_send(bp, req);
5263 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
5264 le16_to_cpu(resp->rss_cos_lb_ctx_id);
5265 hwrm_req_drop(bp, req);
5270 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
5272 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
5273 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
5274 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
5277 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
5279 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5280 struct hwrm_vnic_cfg_input *req;
5281 unsigned int ring = 0, grp_idx;
5285 rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG);
5289 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5290 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
5292 req->default_rx_ring_id =
5293 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
5294 req->default_cmpl_ring_id =
5295 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
5297 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
5298 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
5301 req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
5302 /* Only RSS support for now TBD: COS & LB */
5303 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
5304 req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5305 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5306 VNIC_CFG_REQ_ENABLES_MRU);
5307 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
5309 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
5310 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5311 VNIC_CFG_REQ_ENABLES_MRU);
5312 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
5314 req->rss_rule = cpu_to_le16(0xffff);
5317 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
5318 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
5319 req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
5320 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
5322 req->cos_rule = cpu_to_le16(0xffff);
5325 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
5327 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
5329 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
5330 ring = bp->rx_nr_rings - 1;
5332 grp_idx = bp->rx_ring[ring].bnapi->index;
5333 req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
5334 req->lb_rule = cpu_to_le16(0xffff);
5336 req->mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
5338 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5339 #ifdef CONFIG_BNXT_SRIOV
5341 def_vlan = bp->vf.vlan;
5343 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
5344 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
5345 if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
5346 req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
5348 return hwrm_req_send(bp, req);
5351 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
5353 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
5354 struct hwrm_vnic_free_input *req;
5356 if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
5360 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
5362 hwrm_req_send(bp, req);
5363 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
5367 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
5371 for (i = 0; i < bp->nr_vnics; i++)
5372 bnxt_hwrm_vnic_free_one(bp, i);
5375 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
5376 unsigned int start_rx_ring_idx,
5377 unsigned int nr_rings)
5379 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
5380 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5381 struct hwrm_vnic_alloc_output *resp;
5382 struct hwrm_vnic_alloc_input *req;
5385 rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC);
5389 if (bp->flags & BNXT_FLAG_CHIP_P5)
5390 goto vnic_no_ring_grps;
5392 /* map ring groups to this vnic */
5393 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
5394 grp_idx = bp->rx_ring[i].bnapi->index;
5395 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
5396 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
5400 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
5404 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
5405 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
5407 req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
5409 resp = hwrm_req_hold(bp, req);
5410 rc = hwrm_req_send(bp, req);
5412 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
5413 hwrm_req_drop(bp, req);
5417 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
5419 struct hwrm_vnic_qcaps_output *resp;
5420 struct hwrm_vnic_qcaps_input *req;
5423 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
5424 bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
5425 if (bp->hwrm_spec_code < 0x10600)
5428 rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS);
5432 resp = hwrm_req_hold(bp, req);
5433 rc = hwrm_req_send(bp, req);
5435 u32 flags = le32_to_cpu(resp->flags);
5437 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
5438 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
5439 bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
5441 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
5442 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
5444 /* Older P5 fw before EXT_HW_STATS support did not set
5445 * VLAN_STRIP_CAP properly.
5447 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
5448 (BNXT_CHIP_P5_THOR(bp) &&
5449 !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
5450 bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
5451 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
5452 if (bp->max_tpa_v2) {
5453 if (BNXT_CHIP_P5_THOR(bp))
5454 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
5456 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2;
5459 hwrm_req_drop(bp, req);
5463 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
5465 struct hwrm_ring_grp_alloc_output *resp;
5466 struct hwrm_ring_grp_alloc_input *req;
5470 if (bp->flags & BNXT_FLAG_CHIP_P5)
5473 rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
5477 resp = hwrm_req_hold(bp, req);
5478 for (i = 0; i < bp->rx_nr_rings; i++) {
5479 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
5481 req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
5482 req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
5483 req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
5484 req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
5486 rc = hwrm_req_send(bp, req);
5491 bp->grp_info[grp_idx].fw_grp_id =
5492 le32_to_cpu(resp->ring_group_id);
5494 hwrm_req_drop(bp, req);
5498 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
5500 struct hwrm_ring_grp_free_input *req;
5503 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
5506 if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE))
5509 hwrm_req_hold(bp, req);
5510 for (i = 0; i < bp->cp_nr_rings; i++) {
5511 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
5513 req->ring_group_id =
5514 cpu_to_le32(bp->grp_info[i].fw_grp_id);
5516 hwrm_req_send(bp, req);
5517 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
5519 hwrm_req_drop(bp, req);
5522 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
5523 struct bnxt_ring_struct *ring,
5524 u32 ring_type, u32 map_index)
5526 struct hwrm_ring_alloc_output *resp;
5527 struct hwrm_ring_alloc_input *req;
5528 struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
5529 struct bnxt_ring_grp_info *grp_info;
5533 rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC);
5538 if (rmem->nr_pages > 1) {
5539 req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
5540 /* Page size is in log2 units */
5541 req->page_size = BNXT_PAGE_SHIFT;
5542 req->page_tbl_depth = 1;
5544 req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
5547 /* Association of ring index with doorbell index and MSIX number */
5548 req->logical_id = cpu_to_le16(map_index);
5550 switch (ring_type) {
5551 case HWRM_RING_ALLOC_TX: {
5552 struct bnxt_tx_ring_info *txr;
5554 txr = container_of(ring, struct bnxt_tx_ring_info,
5556 req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
5557 /* Association of transmit ring with completion ring */
5558 grp_info = &bp->grp_info[ring->grp_idx];
5559 req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
5560 req->length = cpu_to_le32(bp->tx_ring_mask + 1);
5561 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5562 req->queue_id = cpu_to_le16(ring->queue_id);
5565 case HWRM_RING_ALLOC_RX:
5566 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5567 req->length = cpu_to_le32(bp->rx_ring_mask + 1);
5568 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5571 /* Association of rx ring with stats context */
5572 grp_info = &bp->grp_info[ring->grp_idx];
5573 req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
5574 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5575 req->enables |= cpu_to_le32(
5576 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5577 if (NET_IP_ALIGN == 2)
5578 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
5579 req->flags = cpu_to_le16(flags);
5582 case HWRM_RING_ALLOC_AGG:
5583 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5584 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
5585 /* Association of agg ring with rx ring */
5586 grp_info = &bp->grp_info[ring->grp_idx];
5587 req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
5588 req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
5589 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5590 req->enables |= cpu_to_le32(
5591 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
5592 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5594 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5596 req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
5598 case HWRM_RING_ALLOC_CMPL:
5599 req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
5600 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
5601 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5602 /* Association of cp ring with nq */
5603 grp_info = &bp->grp_info[map_index];
5604 req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
5605 req->cq_handle = cpu_to_le64(ring->handle);
5606 req->enables |= cpu_to_le32(
5607 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
5608 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
5609 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5612 case HWRM_RING_ALLOC_NQ:
5613 req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
5614 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
5615 if (bp->flags & BNXT_FLAG_USING_MSIX)
5616 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5619 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
5624 resp = hwrm_req_hold(bp, req);
5625 rc = hwrm_req_send(bp, req);
5626 err = le16_to_cpu(resp->error_code);
5627 ring_id = le16_to_cpu(resp->ring_id);
5628 hwrm_req_drop(bp, req);
5632 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
5633 ring_type, rc, err);
5636 ring->fw_ring_id = ring_id;
5640 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
5645 struct hwrm_func_cfg_input *req;
5647 rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
5651 req->fid = cpu_to_le16(0xffff);
5652 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5653 req->async_event_cr = cpu_to_le16(idx);
5654 return hwrm_req_send(bp, req);
5656 struct hwrm_func_vf_cfg_input *req;
5658 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
5663 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5664 req->async_event_cr = cpu_to_le16(idx);
5665 return hwrm_req_send(bp, req);
5669 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5670 u32 map_idx, u32 xid)
5672 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5674 db->doorbell = bp->bar1 + DB_PF_OFFSET_P5;
5676 db->doorbell = bp->bar1 + DB_VF_OFFSET_P5;
5677 switch (ring_type) {
5678 case HWRM_RING_ALLOC_TX:
5679 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
5681 case HWRM_RING_ALLOC_RX:
5682 case HWRM_RING_ALLOC_AGG:
5683 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
5685 case HWRM_RING_ALLOC_CMPL:
5686 db->db_key64 = DBR_PATH_L2;
5688 case HWRM_RING_ALLOC_NQ:
5689 db->db_key64 = DBR_PATH_L2;
5692 db->db_key64 |= (u64)xid << DBR_XID_SFT;
5694 db->doorbell = bp->bar1 + map_idx * 0x80;
5695 switch (ring_type) {
5696 case HWRM_RING_ALLOC_TX:
5697 db->db_key32 = DB_KEY_TX;
5699 case HWRM_RING_ALLOC_RX:
5700 case HWRM_RING_ALLOC_AGG:
5701 db->db_key32 = DB_KEY_RX;
5703 case HWRM_RING_ALLOC_CMPL:
5704 db->db_key32 = DB_KEY_CP;
5710 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5712 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
5716 if (bp->flags & BNXT_FLAG_CHIP_P5)
5717 type = HWRM_RING_ALLOC_NQ;
5719 type = HWRM_RING_ALLOC_CMPL;
5720 for (i = 0; i < bp->cp_nr_rings; i++) {
5721 struct bnxt_napi *bnapi = bp->bnapi[i];
5722 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5723 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5724 u32 map_idx = ring->map_idx;
5725 unsigned int vector;
5727 vector = bp->irq_tbl[map_idx].vector;
5728 disable_irq_nosync(vector);
5729 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5734 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
5735 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5737 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
5740 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
5742 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
5746 type = HWRM_RING_ALLOC_TX;
5747 for (i = 0; i < bp->tx_nr_rings; i++) {
5748 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5749 struct bnxt_ring_struct *ring;
5752 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5753 struct bnxt_napi *bnapi = txr->bnapi;
5754 struct bnxt_cp_ring_info *cpr, *cpr2;
5755 u32 type2 = HWRM_RING_ALLOC_CMPL;
5757 cpr = &bnapi->cp_ring;
5758 cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
5759 ring = &cpr2->cp_ring_struct;
5760 ring->handle = BNXT_TX_HDL;
5761 map_idx = bnapi->index;
5762 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5765 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5767 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5769 ring = &txr->tx_ring_struct;
5771 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5774 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
5777 type = HWRM_RING_ALLOC_RX;
5778 for (i = 0; i < bp->rx_nr_rings; i++) {
5779 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5780 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5781 struct bnxt_napi *bnapi = rxr->bnapi;
5782 u32 map_idx = bnapi->index;
5784 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5787 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
5788 /* If we have agg rings, post agg buffers first. */
5790 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5791 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
5792 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5793 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5794 u32 type2 = HWRM_RING_ALLOC_CMPL;
5795 struct bnxt_cp_ring_info *cpr2;
5797 cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
5798 ring = &cpr2->cp_ring_struct;
5799 ring->handle = BNXT_RX_HDL;
5800 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5803 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5805 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5810 type = HWRM_RING_ALLOC_AGG;
5811 for (i = 0; i < bp->rx_nr_rings; i++) {
5812 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5813 struct bnxt_ring_struct *ring =
5814 &rxr->rx_agg_ring_struct;
5815 u32 grp_idx = ring->grp_idx;
5816 u32 map_idx = grp_idx + bp->rx_nr_rings;
5818 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5822 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5824 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
5825 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5826 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
5833 static int hwrm_ring_free_send_msg(struct bnxt *bp,
5834 struct bnxt_ring_struct *ring,
5835 u32 ring_type, int cmpl_ring_id)
5837 struct hwrm_ring_free_output *resp;
5838 struct hwrm_ring_free_input *req;
5842 if (BNXT_NO_FW_ACCESS(bp))
5845 rc = hwrm_req_init(bp, req, HWRM_RING_FREE);
5849 req->cmpl_ring = cpu_to_le16(cmpl_ring_id);
5850 req->ring_type = ring_type;
5851 req->ring_id = cpu_to_le16(ring->fw_ring_id);
5853 resp = hwrm_req_hold(bp, req);
5854 rc = hwrm_req_send(bp, req);
5855 error_code = le16_to_cpu(resp->error_code);
5856 hwrm_req_drop(bp, req);
5858 if (rc || error_code) {
5859 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
5860 ring_type, rc, error_code);
5866 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
5874 for (i = 0; i < bp->tx_nr_rings; i++) {
5875 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5876 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
5878 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5879 u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
5881 hwrm_ring_free_send_msg(bp, ring,
5882 RING_FREE_REQ_RING_TYPE_TX,
5883 close_path ? cmpl_ring_id :
5884 INVALID_HW_RING_ID);
5885 ring->fw_ring_id = INVALID_HW_RING_ID;
5889 for (i = 0; i < bp->rx_nr_rings; i++) {
5890 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5891 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5892 u32 grp_idx = rxr->bnapi->index;
5894 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5895 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5897 hwrm_ring_free_send_msg(bp, ring,
5898 RING_FREE_REQ_RING_TYPE_RX,
5899 close_path ? cmpl_ring_id :
5900 INVALID_HW_RING_ID);
5901 ring->fw_ring_id = INVALID_HW_RING_ID;
5902 bp->grp_info[grp_idx].rx_fw_ring_id =
5907 if (bp->flags & BNXT_FLAG_CHIP_P5)
5908 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
5910 type = RING_FREE_REQ_RING_TYPE_RX;
5911 for (i = 0; i < bp->rx_nr_rings; i++) {
5912 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5913 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
5914 u32 grp_idx = rxr->bnapi->index;
5916 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5917 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5919 hwrm_ring_free_send_msg(bp, ring, type,
5920 close_path ? cmpl_ring_id :
5921 INVALID_HW_RING_ID);
5922 ring->fw_ring_id = INVALID_HW_RING_ID;
5923 bp->grp_info[grp_idx].agg_fw_ring_id =
5928 /* The completion rings are about to be freed. After that the
5929 * IRQ doorbell will not work anymore. So we need to disable
5932 bnxt_disable_int_sync(bp);
5934 if (bp->flags & BNXT_FLAG_CHIP_P5)
5935 type = RING_FREE_REQ_RING_TYPE_NQ;
5937 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
5938 for (i = 0; i < bp->cp_nr_rings; i++) {
5939 struct bnxt_napi *bnapi = bp->bnapi[i];
5940 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5941 struct bnxt_ring_struct *ring;
5944 for (j = 0; j < 2; j++) {
5945 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
5948 ring = &cpr2->cp_ring_struct;
5949 if (ring->fw_ring_id == INVALID_HW_RING_ID)
5951 hwrm_ring_free_send_msg(bp, ring,
5952 RING_FREE_REQ_RING_TYPE_L2_CMPL,
5953 INVALID_HW_RING_ID);
5954 ring->fw_ring_id = INVALID_HW_RING_ID;
5957 ring = &cpr->cp_ring_struct;
5958 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5959 hwrm_ring_free_send_msg(bp, ring, type,
5960 INVALID_HW_RING_ID);
5961 ring->fw_ring_id = INVALID_HW_RING_ID;
5962 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
5967 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
5970 static int bnxt_hwrm_get_rings(struct bnxt *bp)
5972 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5973 struct hwrm_func_qcfg_output *resp;
5974 struct hwrm_func_qcfg_input *req;
5977 if (bp->hwrm_spec_code < 0x10601)
5980 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
5984 req->fid = cpu_to_le16(0xffff);
5985 resp = hwrm_req_hold(bp, req);
5986 rc = hwrm_req_send(bp, req);
5988 hwrm_req_drop(bp, req);
5992 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
5993 if (BNXT_NEW_RM(bp)) {
5996 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
5997 hw_resc->resv_hw_ring_grps =
5998 le32_to_cpu(resp->alloc_hw_ring_grps);
5999 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
6000 cp = le16_to_cpu(resp->alloc_cmpl_rings);
6001 stats = le16_to_cpu(resp->alloc_stat_ctx);
6002 hw_resc->resv_irqs = cp;
6003 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6004 int rx = hw_resc->resv_rx_rings;
6005 int tx = hw_resc->resv_tx_rings;
6007 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6009 if (cp < (rx + tx)) {
6010 bnxt_trim_rings(bp, &rx, &tx, cp, false);
6011 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6013 hw_resc->resv_rx_rings = rx;
6014 hw_resc->resv_tx_rings = tx;
6016 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
6017 hw_resc->resv_hw_ring_grps = rx;
6019 hw_resc->resv_cp_rings = cp;
6020 hw_resc->resv_stat_ctxs = stats;
6022 hwrm_req_drop(bp, req);
6026 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
6028 struct hwrm_func_qcfg_output *resp;
6029 struct hwrm_func_qcfg_input *req;
6032 if (bp->hwrm_spec_code < 0x10601)
6035 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
6039 req->fid = cpu_to_le16(fid);
6040 resp = hwrm_req_hold(bp, req);
6041 rc = hwrm_req_send(bp, req);
6043 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
6045 hwrm_req_drop(bp, req);
6049 static bool bnxt_rfs_supported(struct bnxt *bp);
6051 static struct hwrm_func_cfg_input *
6052 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6053 int ring_grps, int cp_rings, int stats, int vnics)
6055 struct hwrm_func_cfg_input *req;
6058 if (hwrm_req_init(bp, req, HWRM_FUNC_CFG))
6061 req->fid = cpu_to_le16(0xffff);
6062 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
6063 req->num_tx_rings = cpu_to_le16(tx_rings);
6064 if (BNXT_NEW_RM(bp)) {
6065 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
6066 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
6067 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6068 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
6069 enables |= tx_rings + ring_grps ?
6070 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6071 enables |= rx_rings ?
6072 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6074 enables |= cp_rings ?
6075 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6076 enables |= ring_grps ?
6077 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
6078 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6080 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
6082 req->num_rx_rings = cpu_to_le16(rx_rings);
6083 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6084 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6085 req->num_msix = cpu_to_le16(cp_rings);
6086 req->num_rsscos_ctxs =
6087 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6089 req->num_cmpl_rings = cpu_to_le16(cp_rings);
6090 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6091 req->num_rsscos_ctxs = cpu_to_le16(1);
6092 if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
6093 bnxt_rfs_supported(bp))
6094 req->num_rsscos_ctxs =
6095 cpu_to_le16(ring_grps + 1);
6097 req->num_stat_ctxs = cpu_to_le16(stats);
6098 req->num_vnics = cpu_to_le16(vnics);
6100 req->enables = cpu_to_le32(enables);
6104 static struct hwrm_func_vf_cfg_input *
6105 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6106 int ring_grps, int cp_rings, int stats, int vnics)
6108 struct hwrm_func_vf_cfg_input *req;
6111 if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
6114 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
6115 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
6116 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6117 enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
6118 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6119 enables |= tx_rings + ring_grps ?
6120 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6122 enables |= cp_rings ?
6123 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6124 enables |= ring_grps ?
6125 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
6127 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
6128 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
6130 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
6131 req->num_tx_rings = cpu_to_le16(tx_rings);
6132 req->num_rx_rings = cpu_to_le16(rx_rings);
6133 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6134 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6135 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6137 req->num_cmpl_rings = cpu_to_le16(cp_rings);
6138 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6139 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
6141 req->num_stat_ctxs = cpu_to_le16(stats);
6142 req->num_vnics = cpu_to_le16(vnics);
6144 req->enables = cpu_to_le32(enables);
6149 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6150 int ring_grps, int cp_rings, int stats, int vnics)
6152 struct hwrm_func_cfg_input *req;
6155 req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
6156 cp_rings, stats, vnics);
6160 if (!req->enables) {
6161 hwrm_req_drop(bp, req);
6165 rc = hwrm_req_send(bp, req);
6169 if (bp->hwrm_spec_code < 0x10601)
6170 bp->hw_resc.resv_tx_rings = tx_rings;
6172 return bnxt_hwrm_get_rings(bp);
6176 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6177 int ring_grps, int cp_rings, int stats, int vnics)
6179 struct hwrm_func_vf_cfg_input *req;
6182 if (!BNXT_NEW_RM(bp)) {
6183 bp->hw_resc.resv_tx_rings = tx_rings;
6187 req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6188 cp_rings, stats, vnics);
6192 rc = hwrm_req_send(bp, req);
6196 return bnxt_hwrm_get_rings(bp);
6199 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
6200 int cp, int stat, int vnic)
6203 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
6206 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
6210 int bnxt_nq_rings_in_use(struct bnxt *bp)
6212 int cp = bp->cp_nr_rings;
6213 int ulp_msix, ulp_base;
6215 ulp_msix = bnxt_get_ulp_msix_num(bp);
6217 ulp_base = bnxt_get_ulp_msix_base(bp);
6219 if ((ulp_base + ulp_msix) > cp)
6220 cp = ulp_base + ulp_msix;
6225 static int bnxt_cp_rings_in_use(struct bnxt *bp)
6229 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6230 return bnxt_nq_rings_in_use(bp);
6232 cp = bp->tx_nr_rings + bp->rx_nr_rings;
6236 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
6238 int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
6239 int cp = bp->cp_nr_rings;
6244 if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
6245 return bnxt_get_ulp_msix_base(bp) + ulp_stat;
6247 return cp + ulp_stat;
6250 /* Check if a default RSS map needs to be setup. This function is only
6251 * used on older firmware that does not require reserving RX rings.
6253 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
6255 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6257 /* The RSS map is valid for RX rings set to resv_rx_rings */
6258 if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
6259 hw_resc->resv_rx_rings = bp->rx_nr_rings;
6260 if (!netif_is_rxfh_configured(bp->dev))
6261 bnxt_set_dflt_rss_indir_tbl(bp);
6265 static bool bnxt_need_reserve_rings(struct bnxt *bp)
6267 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6268 int cp = bnxt_cp_rings_in_use(bp);
6269 int nq = bnxt_nq_rings_in_use(bp);
6270 int rx = bp->rx_nr_rings, stat;
6271 int vnic = 1, grp = rx;
6273 if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
6274 bp->hwrm_spec_code >= 0x10601)
6277 /* Old firmware does not need RX ring reservations but we still
6278 * need to setup a default RSS map when needed. With new firmware
6279 * we go through RX ring reservations first and then set up the
6280 * RSS map for the successfully reserved RX rings when needed.
6282 if (!BNXT_NEW_RM(bp)) {
6283 bnxt_check_rss_tbl_no_rmgr(bp);
6286 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6288 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6290 stat = bnxt_get_func_stat_ctxs(bp);
6291 if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
6292 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
6293 (hw_resc->resv_hw_ring_grps != grp &&
6294 !(bp->flags & BNXT_FLAG_CHIP_P5)))
6296 if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
6297 hw_resc->resv_irqs != nq)
6302 static int __bnxt_reserve_rings(struct bnxt *bp)
6304 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6305 int cp = bnxt_nq_rings_in_use(bp);
6306 int tx = bp->tx_nr_rings;
6307 int rx = bp->rx_nr_rings;
6308 int grp, rx_rings, rc;
6312 if (!bnxt_need_reserve_rings(bp))
6315 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
6317 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6319 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6321 grp = bp->rx_nr_rings;
6322 stat = bnxt_get_func_stat_ctxs(bp);
6324 rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
6328 tx = hw_resc->resv_tx_rings;
6329 if (BNXT_NEW_RM(bp)) {
6330 rx = hw_resc->resv_rx_rings;
6331 cp = hw_resc->resv_irqs;
6332 grp = hw_resc->resv_hw_ring_grps;
6333 vnic = hw_resc->resv_vnics;
6334 stat = hw_resc->resv_stat_ctxs;
6338 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6342 if (netif_running(bp->dev))
6345 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
6346 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
6347 bp->dev->hw_features &= ~NETIF_F_LRO;
6348 bp->dev->features &= ~NETIF_F_LRO;
6349 bnxt_set_ring_params(bp);
6352 rx_rings = min_t(int, rx_rings, grp);
6353 cp = min_t(int, cp, bp->cp_nr_rings);
6354 if (stat > bnxt_get_ulp_stat_ctxs(bp))
6355 stat -= bnxt_get_ulp_stat_ctxs(bp);
6356 cp = min_t(int, cp, stat);
6357 rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
6358 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6360 cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
6361 bp->tx_nr_rings = tx;
6363 /* If we cannot reserve all the RX rings, reset the RSS map only
6364 * if absolutely necessary
6366 if (rx_rings != bp->rx_nr_rings) {
6367 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
6368 rx_rings, bp->rx_nr_rings);
6369 if ((bp->dev->priv_flags & IFF_RXFH_CONFIGURED) &&
6370 (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
6371 bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
6372 bnxt_get_max_rss_ring(bp) >= rx_rings)) {
6373 netdev_warn(bp->dev, "RSS table entries reverting to default\n");
6374 bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
6377 bp->rx_nr_rings = rx_rings;
6378 bp->cp_nr_rings = cp;
6380 if (!tx || !rx || !cp || !grp || !vnic || !stat)
6383 if (!netif_is_rxfh_configured(bp->dev))
6384 bnxt_set_dflt_rss_indir_tbl(bp);
6389 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6390 int ring_grps, int cp_rings, int stats,
6393 struct hwrm_func_vf_cfg_input *req;
6396 if (!BNXT_NEW_RM(bp))
6399 req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6400 cp_rings, stats, vnics);
6401 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
6402 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6403 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6404 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6405 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
6406 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
6407 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6408 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6410 req->flags = cpu_to_le32(flags);
6411 return hwrm_req_send_silent(bp, req);
6414 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6415 int ring_grps, int cp_rings, int stats,
6418 struct hwrm_func_cfg_input *req;
6421 req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
6422 cp_rings, stats, vnics);
6423 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
6424 if (BNXT_NEW_RM(bp)) {
6425 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6426 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6427 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6428 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
6429 if (bp->flags & BNXT_FLAG_CHIP_P5)
6430 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
6431 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
6433 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6436 req->flags = cpu_to_le32(flags);
6437 return hwrm_req_send_silent(bp, req);
6440 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6441 int ring_grps, int cp_rings, int stats,
6444 if (bp->hwrm_spec_code < 0x10801)
6448 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
6449 ring_grps, cp_rings, stats,
6452 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6453 cp_rings, stats, vnics);
6456 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
6458 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6459 struct hwrm_ring_aggint_qcaps_output *resp;
6460 struct hwrm_ring_aggint_qcaps_input *req;
6463 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
6464 coal_cap->num_cmpl_dma_aggr_max = 63;
6465 coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
6466 coal_cap->cmpl_aggr_dma_tmr_max = 65535;
6467 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
6468 coal_cap->int_lat_tmr_min_max = 65535;
6469 coal_cap->int_lat_tmr_max_max = 65535;
6470 coal_cap->num_cmpl_aggr_int_max = 65535;
6471 coal_cap->timer_units = 80;
6473 if (bp->hwrm_spec_code < 0x10902)
6476 if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS))
6479 resp = hwrm_req_hold(bp, req);
6480 rc = hwrm_req_send_silent(bp, req);
6482 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
6483 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
6484 coal_cap->num_cmpl_dma_aggr_max =
6485 le16_to_cpu(resp->num_cmpl_dma_aggr_max);
6486 coal_cap->num_cmpl_dma_aggr_during_int_max =
6487 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
6488 coal_cap->cmpl_aggr_dma_tmr_max =
6489 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
6490 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
6491 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
6492 coal_cap->int_lat_tmr_min_max =
6493 le16_to_cpu(resp->int_lat_tmr_min_max);
6494 coal_cap->int_lat_tmr_max_max =
6495 le16_to_cpu(resp->int_lat_tmr_max_max);
6496 coal_cap->num_cmpl_aggr_int_max =
6497 le16_to_cpu(resp->num_cmpl_aggr_int_max);
6498 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
6500 hwrm_req_drop(bp, req);
6503 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
6505 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6507 return usec * 1000 / coal_cap->timer_units;
6510 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
6511 struct bnxt_coal *hw_coal,
6512 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
6514 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6515 u32 cmpl_params = coal_cap->cmpl_params;
6516 u16 val, tmr, max, flags = 0;
6518 max = hw_coal->bufs_per_record * 128;
6519 if (hw_coal->budget)
6520 max = hw_coal->bufs_per_record * hw_coal->budget;
6521 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
6523 val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
6524 req->num_cmpl_aggr_int = cpu_to_le16(val);
6526 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
6527 req->num_cmpl_dma_aggr = cpu_to_le16(val);
6529 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
6530 coal_cap->num_cmpl_dma_aggr_during_int_max);
6531 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
6533 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
6534 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
6535 req->int_lat_tmr_max = cpu_to_le16(tmr);
6537 /* min timer set to 1/2 of interrupt timer */
6538 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
6540 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
6541 req->int_lat_tmr_min = cpu_to_le16(val);
6542 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6545 /* buf timer set to 1/4 of interrupt timer */
6546 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
6547 req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
6550 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
6551 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
6552 val = clamp_t(u16, tmr, 1,
6553 coal_cap->cmpl_aggr_dma_tmr_during_int_max);
6554 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
6556 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
6559 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
6560 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
6561 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
6562 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
6563 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
6564 req->flags = cpu_to_le16(flags);
6565 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
6568 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
6569 struct bnxt_coal *hw_coal)
6571 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req;
6572 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6573 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6574 u32 nq_params = coal_cap->nq_params;
6578 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
6581 rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6585 req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
6587 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
6589 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
6590 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
6591 req->int_lat_tmr_min = cpu_to_le16(tmr);
6592 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6593 return hwrm_req_send(bp, req);
6596 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
6598 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx;
6599 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6600 struct bnxt_coal coal;
6603 /* Tick values in micro seconds.
6604 * 1 coal_buf x bufs_per_record = 1 completion record.
6606 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
6608 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
6609 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
6611 if (!bnapi->rx_ring)
6614 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6618 bnxt_hwrm_set_coal_params(bp, &coal, req_rx);
6620 req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
6622 return hwrm_req_send(bp, req_rx);
6625 int bnxt_hwrm_set_coal(struct bnxt *bp)
6627 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx,
6631 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6635 rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6637 hwrm_req_drop(bp, req_rx);
6641 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx);
6642 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx);
6644 hwrm_req_hold(bp, req_rx);
6645 hwrm_req_hold(bp, req_tx);
6646 for (i = 0; i < bp->cp_nr_rings; i++) {
6647 struct bnxt_napi *bnapi = bp->bnapi[i];
6648 struct bnxt_coal *hw_coal;
6652 if (!bnapi->rx_ring) {
6653 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6656 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
6658 req->ring_id = cpu_to_le16(ring_id);
6660 rc = hwrm_req_send(bp, req);
6664 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6667 if (bnapi->rx_ring && bnapi->tx_ring) {
6669 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6670 req->ring_id = cpu_to_le16(ring_id);
6671 rc = hwrm_req_send(bp, req);
6676 hw_coal = &bp->rx_coal;
6678 hw_coal = &bp->tx_coal;
6679 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
6681 hwrm_req_drop(bp, req_rx);
6682 hwrm_req_drop(bp, req_tx);
6686 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
6688 struct hwrm_stat_ctx_clr_stats_input *req0 = NULL;
6689 struct hwrm_stat_ctx_free_input *req;
6695 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6698 if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE))
6700 if (BNXT_FW_MAJ(bp) <= 20) {
6701 if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) {
6702 hwrm_req_drop(bp, req);
6705 hwrm_req_hold(bp, req0);
6707 hwrm_req_hold(bp, req);
6708 for (i = 0; i < bp->cp_nr_rings; i++) {
6709 struct bnxt_napi *bnapi = bp->bnapi[i];
6710 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6712 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
6713 req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
6715 req0->stat_ctx_id = req->stat_ctx_id;
6716 hwrm_req_send(bp, req0);
6718 hwrm_req_send(bp, req);
6720 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
6723 hwrm_req_drop(bp, req);
6725 hwrm_req_drop(bp, req0);
6728 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
6730 struct hwrm_stat_ctx_alloc_output *resp;
6731 struct hwrm_stat_ctx_alloc_input *req;
6734 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6737 rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC);
6741 req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
6742 req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
6744 resp = hwrm_req_hold(bp, req);
6745 for (i = 0; i < bp->cp_nr_rings; i++) {
6746 struct bnxt_napi *bnapi = bp->bnapi[i];
6747 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6749 req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
6751 rc = hwrm_req_send(bp, req);
6755 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
6757 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
6759 hwrm_req_drop(bp, req);
6763 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
6765 struct hwrm_func_qcfg_output *resp;
6766 struct hwrm_func_qcfg_input *req;
6767 u32 min_db_offset = 0;
6771 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
6775 req->fid = cpu_to_le16(0xffff);
6776 resp = hwrm_req_hold(bp, req);
6777 rc = hwrm_req_send(bp, req);
6779 goto func_qcfg_exit;
6781 #ifdef CONFIG_BNXT_SRIOV
6783 struct bnxt_vf_info *vf = &bp->vf;
6785 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
6787 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
6790 flags = le16_to_cpu(resp->flags);
6791 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
6792 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
6793 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
6794 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
6795 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
6797 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
6798 bp->flags |= BNXT_FLAG_MULTI_HOST;
6799 if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
6800 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
6802 switch (resp->port_partition_type) {
6803 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
6804 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
6805 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
6806 bp->port_partition_type = resp->port_partition_type;
6809 if (bp->hwrm_spec_code < 0x10707 ||
6810 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
6811 bp->br_mode = BRIDGE_MODE_VEB;
6812 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
6813 bp->br_mode = BRIDGE_MODE_VEPA;
6815 bp->br_mode = BRIDGE_MODE_UNDEF;
6817 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
6819 bp->max_mtu = BNXT_MAX_MTU;
6822 goto func_qcfg_exit;
6824 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6826 min_db_offset = DB_PF_OFFSET_P5;
6828 min_db_offset = DB_VF_OFFSET_P5;
6830 bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
6832 if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
6833 bp->db_size <= min_db_offset)
6834 bp->db_size = pci_resource_len(bp->pdev, 2);
6837 hwrm_req_drop(bp, req);
6841 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info *ctx,
6842 struct hwrm_func_backing_store_qcaps_output *resp)
6844 struct bnxt_mem_init *mem_init;
6850 init_val = resp->ctx_kind_initializer;
6851 init_mask = le16_to_cpu(resp->ctx_init_mask);
6852 offset = &resp->qp_init_offset;
6853 mem_init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
6854 for (i = 0; i < BNXT_CTX_MEM_INIT_MAX; i++, mem_init++, offset++) {
6855 mem_init->init_val = init_val;
6856 mem_init->offset = BNXT_MEM_INVALID_OFFSET;
6859 if (i == BNXT_CTX_MEM_INIT_STAT)
6860 offset = &resp->stat_init_offset;
6861 if (init_mask & (1 << i))
6862 mem_init->offset = *offset * 4;
6864 mem_init->init_val = 0;
6866 ctx->mem_init[BNXT_CTX_MEM_INIT_QP].size = ctx->qp_entry_size;
6867 ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ].size = ctx->srq_entry_size;
6868 ctx->mem_init[BNXT_CTX_MEM_INIT_CQ].size = ctx->cq_entry_size;
6869 ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC].size = ctx->vnic_entry_size;
6870 ctx->mem_init[BNXT_CTX_MEM_INIT_STAT].size = ctx->stat_entry_size;
6871 ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV].size = ctx->mrav_entry_size;
6874 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
6876 struct hwrm_func_backing_store_qcaps_output *resp;
6877 struct hwrm_func_backing_store_qcaps_input *req;
6880 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
6883 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
6887 resp = hwrm_req_hold(bp, req);
6888 rc = hwrm_req_send_silent(bp, req);
6890 struct bnxt_ctx_pg_info *ctx_pg;
6891 struct bnxt_ctx_mem_info *ctx;
6894 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
6899 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
6900 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
6901 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
6902 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
6903 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
6904 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
6905 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
6906 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
6907 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
6908 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
6909 ctx->vnic_max_vnic_entries =
6910 le16_to_cpu(resp->vnic_max_vnic_entries);
6911 ctx->vnic_max_ring_table_entries =
6912 le16_to_cpu(resp->vnic_max_ring_table_entries);
6913 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
6914 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
6915 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
6916 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
6917 ctx->tqm_min_entries_per_ring =
6918 le32_to_cpu(resp->tqm_min_entries_per_ring);
6919 ctx->tqm_max_entries_per_ring =
6920 le32_to_cpu(resp->tqm_max_entries_per_ring);
6921 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
6922 if (!ctx->tqm_entries_multiple)
6923 ctx->tqm_entries_multiple = 1;
6924 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
6925 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
6926 ctx->mrav_num_entries_units =
6927 le16_to_cpu(resp->mrav_num_entries_units);
6928 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
6929 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
6931 bnxt_init_ctx_initializer(ctx, resp);
6933 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
6934 if (!ctx->tqm_fp_rings_count)
6935 ctx->tqm_fp_rings_count = bp->max_q;
6936 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
6937 ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
6939 tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
6940 ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
6946 for (i = 0; i < tqm_rings; i++, ctx_pg++)
6947 ctx->tqm_mem[i] = ctx_pg;
6953 hwrm_req_drop(bp, req);
6957 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
6960 if (!rmem->nr_pages)
6963 BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
6964 if (rmem->depth >= 1) {
6965 if (rmem->depth == 2)
6969 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
6971 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
6975 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
6976 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
6977 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
6978 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
6979 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
6980 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
6982 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
6984 struct hwrm_func_backing_store_cfg_input *req;
6985 struct bnxt_ctx_mem_info *ctx = bp->ctx;
6986 struct bnxt_ctx_pg_info *ctx_pg;
6987 void **__req = (void **)&req;
6988 u32 req_len = sizeof(*req);
6989 __le32 *num_entries;
7000 if (req_len > bp->hwrm_max_ext_req_len)
7001 req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
7002 rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len);
7006 req->enables = cpu_to_le32(enables);
7007 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
7008 ctx_pg = &ctx->qp_mem;
7009 req->qp_num_entries = cpu_to_le32(ctx_pg->entries);
7010 req->qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
7011 req->qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
7012 req->qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
7013 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7014 &req->qpc_pg_size_qpc_lvl,
7015 &req->qpc_page_dir);
7017 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
7018 ctx_pg = &ctx->srq_mem;
7019 req->srq_num_entries = cpu_to_le32(ctx_pg->entries);
7020 req->srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
7021 req->srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
7022 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7023 &req->srq_pg_size_srq_lvl,
7024 &req->srq_page_dir);
7026 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
7027 ctx_pg = &ctx->cq_mem;
7028 req->cq_num_entries = cpu_to_le32(ctx_pg->entries);
7029 req->cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
7030 req->cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
7031 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7032 &req->cq_pg_size_cq_lvl,
7035 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
7036 ctx_pg = &ctx->vnic_mem;
7037 req->vnic_num_vnic_entries =
7038 cpu_to_le16(ctx->vnic_max_vnic_entries);
7039 req->vnic_num_ring_table_entries =
7040 cpu_to_le16(ctx->vnic_max_ring_table_entries);
7041 req->vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
7042 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7043 &req->vnic_pg_size_vnic_lvl,
7044 &req->vnic_page_dir);
7046 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
7047 ctx_pg = &ctx->stat_mem;
7048 req->stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
7049 req->stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
7050 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7051 &req->stat_pg_size_stat_lvl,
7052 &req->stat_page_dir);
7054 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
7055 ctx_pg = &ctx->mrav_mem;
7056 req->mrav_num_entries = cpu_to_le32(ctx_pg->entries);
7057 if (ctx->mrav_num_entries_units)
7059 FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
7060 req->mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
7061 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7062 &req->mrav_pg_size_mrav_lvl,
7063 &req->mrav_page_dir);
7065 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
7066 ctx_pg = &ctx->tim_mem;
7067 req->tim_num_entries = cpu_to_le32(ctx_pg->entries);
7068 req->tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
7069 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7070 &req->tim_pg_size_tim_lvl,
7071 &req->tim_page_dir);
7073 for (i = 0, num_entries = &req->tqm_sp_num_entries,
7074 pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl,
7075 pg_dir = &req->tqm_sp_page_dir,
7076 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
7077 i < BNXT_MAX_TQM_RINGS;
7078 i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
7079 if (!(enables & ena))
7082 req->tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
7083 ctx_pg = ctx->tqm_mem[i];
7084 *num_entries = cpu_to_le32(ctx_pg->entries);
7085 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
7087 req->flags = cpu_to_le32(flags);
7088 return hwrm_req_send(bp, req);
7091 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
7092 struct bnxt_ctx_pg_info *ctx_pg)
7094 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7096 rmem->page_size = BNXT_PAGE_SIZE;
7097 rmem->pg_arr = ctx_pg->ctx_pg_arr;
7098 rmem->dma_arr = ctx_pg->ctx_dma_arr;
7099 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
7100 if (rmem->depth >= 1)
7101 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
7102 return bnxt_alloc_ring(bp, rmem);
7105 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
7106 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
7107 u8 depth, struct bnxt_mem_init *mem_init)
7109 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7115 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7116 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
7117 ctx_pg->nr_pages = 0;
7120 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
7124 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
7126 if (!ctx_pg->ctx_pg_tbl)
7128 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
7129 rmem->nr_pages = nr_tbls;
7130 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7133 for (i = 0; i < nr_tbls; i++) {
7134 struct bnxt_ctx_pg_info *pg_tbl;
7136 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
7139 ctx_pg->ctx_pg_tbl[i] = pg_tbl;
7140 rmem = &pg_tbl->ring_mem;
7141 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
7142 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
7144 rmem->nr_pages = MAX_CTX_PAGES;
7145 rmem->mem_init = mem_init;
7146 if (i == (nr_tbls - 1)) {
7147 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
7150 rmem->nr_pages = rem;
7152 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
7157 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7158 if (rmem->nr_pages > 1 || depth)
7160 rmem->mem_init = mem_init;
7161 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7166 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
7167 struct bnxt_ctx_pg_info *ctx_pg)
7169 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7171 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
7172 ctx_pg->ctx_pg_tbl) {
7173 int i, nr_tbls = rmem->nr_pages;
7175 for (i = 0; i < nr_tbls; i++) {
7176 struct bnxt_ctx_pg_info *pg_tbl;
7177 struct bnxt_ring_mem_info *rmem2;
7179 pg_tbl = ctx_pg->ctx_pg_tbl[i];
7182 rmem2 = &pg_tbl->ring_mem;
7183 bnxt_free_ring(bp, rmem2);
7184 ctx_pg->ctx_pg_arr[i] = NULL;
7186 ctx_pg->ctx_pg_tbl[i] = NULL;
7188 kfree(ctx_pg->ctx_pg_tbl);
7189 ctx_pg->ctx_pg_tbl = NULL;
7191 bnxt_free_ring(bp, rmem);
7192 ctx_pg->nr_pages = 0;
7195 static void bnxt_free_ctx_mem(struct bnxt *bp)
7197 struct bnxt_ctx_mem_info *ctx = bp->ctx;
7203 if (ctx->tqm_mem[0]) {
7204 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
7205 bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
7206 kfree(ctx->tqm_mem[0]);
7207 ctx->tqm_mem[0] = NULL;
7210 bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
7211 bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
7212 bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
7213 bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
7214 bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
7215 bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
7216 bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
7217 ctx->flags &= ~BNXT_CTX_FLAG_INITED;
7220 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
7222 struct bnxt_ctx_pg_info *ctx_pg;
7223 struct bnxt_ctx_mem_info *ctx;
7224 struct bnxt_mem_init *init;
7225 u32 mem_size, ena, entries;
7226 u32 entries_sp, min;
7233 rc = bnxt_hwrm_func_backing_store_qcaps(bp);
7235 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
7240 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
7243 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
7249 ctx_pg = &ctx->qp_mem;
7250 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
7252 if (ctx->qp_entry_size) {
7253 mem_size = ctx->qp_entry_size * ctx_pg->entries;
7254 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
7255 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7260 ctx_pg = &ctx->srq_mem;
7261 ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
7262 if (ctx->srq_entry_size) {
7263 mem_size = ctx->srq_entry_size * ctx_pg->entries;
7264 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ];
7265 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7270 ctx_pg = &ctx->cq_mem;
7271 ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
7272 if (ctx->cq_entry_size) {
7273 mem_size = ctx->cq_entry_size * ctx_pg->entries;
7274 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_CQ];
7275 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7280 ctx_pg = &ctx->vnic_mem;
7281 ctx_pg->entries = ctx->vnic_max_vnic_entries +
7282 ctx->vnic_max_ring_table_entries;
7283 if (ctx->vnic_entry_size) {
7284 mem_size = ctx->vnic_entry_size * ctx_pg->entries;
7285 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC];
7286 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
7291 ctx_pg = &ctx->stat_mem;
7292 ctx_pg->entries = ctx->stat_max_entries;
7293 if (ctx->stat_entry_size) {
7294 mem_size = ctx->stat_entry_size * ctx_pg->entries;
7295 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_STAT];
7296 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
7302 if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
7305 ctx_pg = &ctx->mrav_mem;
7306 /* 128K extra is needed to accommodate static AH context
7307 * allocation by f/w.
7309 num_mr = 1024 * 256;
7310 num_ah = 1024 * 128;
7311 ctx_pg->entries = num_mr + num_ah;
7312 if (ctx->mrav_entry_size) {
7313 mem_size = ctx->mrav_entry_size * ctx_pg->entries;
7314 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV];
7315 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, init);
7319 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
7320 if (ctx->mrav_num_entries_units)
7322 ((num_mr / ctx->mrav_num_entries_units) << 16) |
7323 (num_ah / ctx->mrav_num_entries_units);
7325 ctx_pg = &ctx->tim_mem;
7326 ctx_pg->entries = ctx->qp_mem.entries;
7327 if (ctx->tim_entry_size) {
7328 mem_size = ctx->tim_entry_size * ctx_pg->entries;
7329 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, NULL);
7333 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
7336 min = ctx->tqm_min_entries_per_ring;
7337 entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
7338 2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
7339 entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
7340 entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries);
7341 entries = roundup(entries, ctx->tqm_entries_multiple);
7342 entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
7343 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
7344 ctx_pg = ctx->tqm_mem[i];
7345 ctx_pg->entries = i ? entries : entries_sp;
7346 if (ctx->tqm_entry_size) {
7347 mem_size = ctx->tqm_entry_size * ctx_pg->entries;
7348 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1,
7353 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
7355 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
7356 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
7358 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
7362 ctx->flags |= BNXT_CTX_FLAG_INITED;
7366 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
7368 struct hwrm_func_resource_qcaps_output *resp;
7369 struct hwrm_func_resource_qcaps_input *req;
7370 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7373 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS);
7377 req->fid = cpu_to_le16(0xffff);
7378 resp = hwrm_req_hold(bp, req);
7379 rc = hwrm_req_send_silent(bp, req);
7381 goto hwrm_func_resc_qcaps_exit;
7383 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
7385 goto hwrm_func_resc_qcaps_exit;
7387 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
7388 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7389 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
7390 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7391 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
7392 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7393 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
7394 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7395 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
7396 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
7397 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
7398 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7399 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
7400 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7401 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
7402 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7404 if (bp->flags & BNXT_FLAG_CHIP_P5) {
7405 u16 max_msix = le16_to_cpu(resp->max_msix);
7407 hw_resc->max_nqs = max_msix;
7408 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
7412 struct bnxt_pf_info *pf = &bp->pf;
7414 pf->vf_resv_strategy =
7415 le16_to_cpu(resp->vf_reservation_strategy);
7416 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
7417 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
7419 hwrm_func_resc_qcaps_exit:
7420 hwrm_req_drop(bp, req);
7424 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
7426 struct hwrm_port_mac_ptp_qcfg_output *resp;
7427 struct hwrm_port_mac_ptp_qcfg_input *req;
7428 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
7432 if (bp->hwrm_spec_code < 0x10801) {
7437 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG);
7441 req->port_id = cpu_to_le16(bp->pf.port_id);
7442 resp = hwrm_req_hold(bp, req);
7443 rc = hwrm_req_send(bp, req);
7447 flags = resp->flags;
7448 if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
7453 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
7461 if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) {
7462 ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
7463 ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
7464 } else if (bp->flags & BNXT_FLAG_CHIP_P5) {
7465 ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
7466 ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
7471 rc = bnxt_ptp_init(bp);
7473 netdev_warn(bp->dev, "PTP initialization failed.\n");
7475 hwrm_req_drop(bp, req);
7486 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
7488 struct hwrm_func_qcaps_output *resp;
7489 struct hwrm_func_qcaps_input *req;
7490 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7491 u32 flags, flags_ext;
7494 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
7498 req->fid = cpu_to_le16(0xffff);
7499 resp = hwrm_req_hold(bp, req);
7500 rc = hwrm_req_send(bp, req);
7502 goto hwrm_func_qcaps_exit;
7504 flags = le32_to_cpu(resp->flags);
7505 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
7506 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
7507 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
7508 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
7509 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
7510 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
7511 if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
7512 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
7513 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
7514 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
7515 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
7516 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
7517 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
7518 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
7519 if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
7520 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
7522 flags_ext = le32_to_cpu(resp->flags_ext);
7523 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
7524 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
7525 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
7526 bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
7528 bp->tx_push_thresh = 0;
7529 if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
7530 BNXT_FW_MAJ(bp) > 217)
7531 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
7533 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7534 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7535 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7536 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7537 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
7538 if (!hw_resc->max_hw_ring_grps)
7539 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
7540 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7541 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7542 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7545 struct bnxt_pf_info *pf = &bp->pf;
7547 pf->fw_fid = le16_to_cpu(resp->fid);
7548 pf->port_id = le16_to_cpu(resp->port_id);
7549 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
7550 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
7551 pf->max_vfs = le16_to_cpu(resp->max_vfs);
7552 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
7553 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
7554 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
7555 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
7556 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
7557 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
7558 bp->flags &= ~BNXT_FLAG_WOL_CAP;
7559 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
7560 bp->flags |= BNXT_FLAG_WOL_CAP;
7561 if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
7562 __bnxt_hwrm_ptp_qcfg(bp);
7569 #ifdef CONFIG_BNXT_SRIOV
7570 struct bnxt_vf_info *vf = &bp->vf;
7572 vf->fw_fid = le16_to_cpu(resp->fid);
7573 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
7577 hwrm_func_qcaps_exit:
7578 hwrm_req_drop(bp, req);
7582 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
7584 static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
7588 rc = __bnxt_hwrm_func_qcaps(bp);
7591 rc = bnxt_hwrm_queue_qportcfg(bp);
7593 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
7596 if (bp->hwrm_spec_code >= 0x10803) {
7597 rc = bnxt_alloc_ctx_mem(bp);
7600 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
7602 bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
7607 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
7609 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
7610 struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req;
7614 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
7617 rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS);
7621 resp = hwrm_req_hold(bp, req);
7622 rc = hwrm_req_send(bp, req);
7624 goto hwrm_cfa_adv_qcaps_exit;
7626 flags = le32_to_cpu(resp->flags);
7628 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
7629 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
7631 hwrm_cfa_adv_qcaps_exit:
7632 hwrm_req_drop(bp, req);
7636 static int __bnxt_alloc_fw_health(struct bnxt *bp)
7641 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
7648 static int bnxt_alloc_fw_health(struct bnxt *bp)
7652 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
7653 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7656 rc = __bnxt_alloc_fw_health(bp);
7658 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
7659 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7666 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
7668 writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
7669 BNXT_GRCPF_REG_WINDOW_BASE_OUT +
7670 BNXT_FW_HEALTH_WIN_MAP_OFF);
7673 bool bnxt_is_fw_healthy(struct bnxt *bp)
7675 if (bp->fw_health && bp->fw_health->status_reliable) {
7678 fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
7679 if (fw_status && !BNXT_FW_IS_HEALTHY(fw_status))
7686 static void bnxt_inv_fw_health_reg(struct bnxt *bp)
7688 struct bnxt_fw_health *fw_health = bp->fw_health;
7691 if (!fw_health || !fw_health->status_reliable)
7694 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
7695 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
7696 fw_health->status_reliable = false;
7699 static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
7707 bp->fw_health->status_reliable = false;
7709 __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
7710 hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
7712 sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
7713 if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
7714 if (!bp->chip_num) {
7715 __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
7716 bp->chip_num = readl(bp->bar0 +
7717 BNXT_FW_HEALTH_WIN_BASE +
7718 BNXT_GRC_REG_CHIP_NUM);
7720 if (!BNXT_CHIP_P5(bp))
7723 status_loc = BNXT_GRC_REG_STATUS_P5 |
7724 BNXT_FW_HEALTH_REG_TYPE_BAR0;
7726 status_loc = readl(hs + offsetof(struct hcomm_status,
7730 if (__bnxt_alloc_fw_health(bp)) {
7731 netdev_warn(bp->dev, "no memory for firmware status checks\n");
7735 bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
7736 reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
7737 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
7738 __bnxt_map_fw_health_reg(bp, status_loc);
7739 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
7740 BNXT_FW_HEALTH_WIN_OFF(status_loc);
7743 bp->fw_health->status_reliable = true;
7746 static int bnxt_map_fw_health_regs(struct bnxt *bp)
7748 struct bnxt_fw_health *fw_health = bp->fw_health;
7749 u32 reg_base = 0xffffffff;
7752 bp->fw_health->status_reliable = false;
7753 /* Only pre-map the monitoring GRC registers using window 3 */
7754 for (i = 0; i < 4; i++) {
7755 u32 reg = fw_health->regs[i];
7757 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
7759 if (reg_base == 0xffffffff)
7760 reg_base = reg & BNXT_GRC_BASE_MASK;
7761 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
7763 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
7765 bp->fw_health->status_reliable = true;
7766 if (reg_base == 0xffffffff)
7769 __bnxt_map_fw_health_reg(bp, reg_base);
7773 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
7775 struct bnxt_fw_health *fw_health = bp->fw_health;
7776 struct hwrm_error_recovery_qcfg_output *resp;
7777 struct hwrm_error_recovery_qcfg_input *req;
7780 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7783 rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG);
7787 resp = hwrm_req_hold(bp, req);
7788 rc = hwrm_req_send(bp, req);
7790 goto err_recovery_out;
7791 fw_health->flags = le32_to_cpu(resp->flags);
7792 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
7793 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
7795 goto err_recovery_out;
7797 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
7798 fw_health->master_func_wait_dsecs =
7799 le32_to_cpu(resp->master_func_wait_period);
7800 fw_health->normal_func_wait_dsecs =
7801 le32_to_cpu(resp->normal_func_wait_period);
7802 fw_health->post_reset_wait_dsecs =
7803 le32_to_cpu(resp->master_func_wait_period_after_reset);
7804 fw_health->post_reset_max_wait_dsecs =
7805 le32_to_cpu(resp->max_bailout_time_after_reset);
7806 fw_health->regs[BNXT_FW_HEALTH_REG] =
7807 le32_to_cpu(resp->fw_health_status_reg);
7808 fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
7809 le32_to_cpu(resp->fw_heartbeat_reg);
7810 fw_health->regs[BNXT_FW_RESET_CNT_REG] =
7811 le32_to_cpu(resp->fw_reset_cnt_reg);
7812 fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
7813 le32_to_cpu(resp->reset_inprogress_reg);
7814 fw_health->fw_reset_inprog_reg_mask =
7815 le32_to_cpu(resp->reset_inprogress_reg_mask);
7816 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
7817 if (fw_health->fw_reset_seq_cnt >= 16) {
7819 goto err_recovery_out;
7821 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
7822 fw_health->fw_reset_seq_regs[i] =
7823 le32_to_cpu(resp->reset_reg[i]);
7824 fw_health->fw_reset_seq_vals[i] =
7825 le32_to_cpu(resp->reset_reg_val[i]);
7826 fw_health->fw_reset_seq_delay_msec[i] =
7827 resp->delay_after_reset[i];
7830 hwrm_req_drop(bp, req);
7832 rc = bnxt_map_fw_health_regs(bp);
7834 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7838 static int bnxt_hwrm_func_reset(struct bnxt *bp)
7840 struct hwrm_func_reset_input *req;
7843 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET);
7848 hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT);
7849 return hwrm_req_send(bp, req);
7852 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
7854 struct hwrm_nvm_get_dev_info_output nvm_info;
7856 if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
7857 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
7858 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
7859 nvm_info.nvm_cfg_ver_upd);
7862 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
7864 struct hwrm_queue_qportcfg_output *resp;
7865 struct hwrm_queue_qportcfg_input *req;
7870 rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG);
7874 resp = hwrm_req_hold(bp, req);
7875 rc = hwrm_req_send(bp, req);
7879 if (!resp->max_configurable_queues) {
7883 bp->max_tc = resp->max_configurable_queues;
7884 bp->max_lltc = resp->max_configurable_lossless_queues;
7885 if (bp->max_tc > BNXT_MAX_QUEUE)
7886 bp->max_tc = BNXT_MAX_QUEUE;
7888 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
7889 qptr = &resp->queue_id0;
7890 for (i = 0, j = 0; i < bp->max_tc; i++) {
7891 bp->q_info[j].queue_id = *qptr;
7892 bp->q_ids[i] = *qptr++;
7893 bp->q_info[j].queue_profile = *qptr++;
7894 bp->tc_to_qidx[j] = j;
7895 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
7896 (no_rdma && BNXT_PF(bp)))
7899 bp->max_q = bp->max_tc;
7900 bp->max_tc = max_t(u8, j, 1);
7902 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
7905 if (bp->max_lltc > bp->max_tc)
7906 bp->max_lltc = bp->max_tc;
7909 hwrm_req_drop(bp, req);
7913 static int bnxt_hwrm_poll(struct bnxt *bp)
7915 struct hwrm_ver_get_input *req;
7918 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
7922 req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
7923 req->hwrm_intf_min = HWRM_VERSION_MINOR;
7924 req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
7926 hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT);
7927 rc = hwrm_req_send(bp, req);
7931 static int bnxt_hwrm_ver_get(struct bnxt *bp)
7933 struct hwrm_ver_get_output *resp;
7934 struct hwrm_ver_get_input *req;
7935 u16 fw_maj, fw_min, fw_bld, fw_rsv;
7936 u32 dev_caps_cfg, hwrm_ver;
7939 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
7943 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
7944 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
7945 req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
7946 req->hwrm_intf_min = HWRM_VERSION_MINOR;
7947 req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
7949 resp = hwrm_req_hold(bp, req);
7950 rc = hwrm_req_send(bp, req);
7952 goto hwrm_ver_get_exit;
7954 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
7956 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
7957 resp->hwrm_intf_min_8b << 8 |
7958 resp->hwrm_intf_upd_8b;
7959 if (resp->hwrm_intf_maj_8b < 1) {
7960 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
7961 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7962 resp->hwrm_intf_upd_8b);
7963 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
7966 hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
7967 HWRM_VERSION_UPDATE;
7969 if (bp->hwrm_spec_code > hwrm_ver)
7970 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
7971 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
7972 HWRM_VERSION_UPDATE);
7974 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
7975 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7976 resp->hwrm_intf_upd_8b);
7978 fw_maj = le16_to_cpu(resp->hwrm_fw_major);
7979 if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
7980 fw_min = le16_to_cpu(resp->hwrm_fw_minor);
7981 fw_bld = le16_to_cpu(resp->hwrm_fw_build);
7982 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
7983 len = FW_VER_STR_LEN;
7985 fw_maj = resp->hwrm_fw_maj_8b;
7986 fw_min = resp->hwrm_fw_min_8b;
7987 fw_bld = resp->hwrm_fw_bld_8b;
7988 fw_rsv = resp->hwrm_fw_rsvd_8b;
7989 len = BC_HWRM_STR_LEN;
7991 bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
7992 snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
7995 if (strlen(resp->active_pkg_name)) {
7996 int fw_ver_len = strlen(bp->fw_ver_str);
7998 snprintf(bp->fw_ver_str + fw_ver_len,
7999 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
8000 resp->active_pkg_name);
8001 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
8004 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
8005 if (!bp->hwrm_cmd_timeout)
8006 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
8008 if (resp->hwrm_intf_maj_8b >= 1) {
8009 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
8010 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
8012 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
8013 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
8015 bp->chip_num = le16_to_cpu(resp->chip_num);
8016 bp->chip_rev = resp->chip_rev;
8017 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
8019 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
8021 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
8022 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
8023 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
8024 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
8026 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
8027 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
8030 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
8031 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
8034 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
8035 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
8038 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
8039 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
8042 hwrm_req_drop(bp, req);
8046 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
8048 struct hwrm_fw_set_time_input *req;
8050 time64_t now = ktime_get_real_seconds();
8053 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
8054 bp->hwrm_spec_code < 0x10400)
8057 time64_to_tm(now, 0, &tm);
8058 rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME);
8062 req->year = cpu_to_le16(1900 + tm.tm_year);
8063 req->month = 1 + tm.tm_mon;
8064 req->day = tm.tm_mday;
8065 req->hour = tm.tm_hour;
8066 req->minute = tm.tm_min;
8067 req->second = tm.tm_sec;
8068 return hwrm_req_send(bp, req);
8071 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
8076 sw_tmp = (*sw & ~mask) | hw;
8077 if (hw < (*sw & mask))
8079 WRITE_ONCE(*sw, sw_tmp);
8082 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
8083 int count, bool ignore_zero)
8087 for (i = 0; i < count; i++) {
8088 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
8090 if (ignore_zero && !hw)
8093 if (masks[i] == -1ULL)
8096 bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
8100 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
8102 if (!stats->hw_stats)
8105 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
8106 stats->hw_masks, stats->len / 8, false);
8109 static void bnxt_accumulate_all_stats(struct bnxt *bp)
8111 struct bnxt_stats_mem *ring0_stats;
8112 bool ignore_zero = false;
8115 /* Chip bug. Counter intermittently becomes 0. */
8116 if (bp->flags & BNXT_FLAG_CHIP_P5)
8119 for (i = 0; i < bp->cp_nr_rings; i++) {
8120 struct bnxt_napi *bnapi = bp->bnapi[i];
8121 struct bnxt_cp_ring_info *cpr;
8122 struct bnxt_stats_mem *stats;
8124 cpr = &bnapi->cp_ring;
8125 stats = &cpr->stats;
8127 ring0_stats = stats;
8128 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
8129 ring0_stats->hw_masks,
8130 ring0_stats->len / 8, ignore_zero);
8132 if (bp->flags & BNXT_FLAG_PORT_STATS) {
8133 struct bnxt_stats_mem *stats = &bp->port_stats;
8134 __le64 *hw_stats = stats->hw_stats;
8135 u64 *sw_stats = stats->sw_stats;
8136 u64 *masks = stats->hw_masks;
8139 cnt = sizeof(struct rx_port_stats) / 8;
8140 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8142 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8143 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8144 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8145 cnt = sizeof(struct tx_port_stats) / 8;
8146 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8148 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
8149 bnxt_accumulate_stats(&bp->rx_port_stats_ext);
8150 bnxt_accumulate_stats(&bp->tx_port_stats_ext);
8154 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
8156 struct hwrm_port_qstats_input *req;
8157 struct bnxt_pf_info *pf = &bp->pf;
8160 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
8163 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8166 rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS);
8171 req->port_id = cpu_to_le16(pf->port_id);
8172 req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
8173 BNXT_TX_PORT_STATS_BYTE_OFFSET);
8174 req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
8175 return hwrm_req_send(bp, req);
8178 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
8180 struct hwrm_queue_pri2cos_qcfg_output *resp_qc;
8181 struct hwrm_queue_pri2cos_qcfg_input *req_qc;
8182 struct hwrm_port_qstats_ext_output *resp_qs;
8183 struct hwrm_port_qstats_ext_input *req_qs;
8184 struct bnxt_pf_info *pf = &bp->pf;
8188 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
8191 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8194 rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT);
8198 req_qs->flags = flags;
8199 req_qs->port_id = cpu_to_le16(pf->port_id);
8200 req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
8201 req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
8202 tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
8203 sizeof(struct tx_port_stats_ext) : 0;
8204 req_qs->tx_stat_size = cpu_to_le16(tx_stat_size);
8205 req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
8206 resp_qs = hwrm_req_hold(bp, req_qs);
8207 rc = hwrm_req_send(bp, req_qs);
8209 bp->fw_rx_stats_ext_size =
8210 le16_to_cpu(resp_qs->rx_stat_size) / 8;
8211 bp->fw_tx_stats_ext_size = tx_stat_size ?
8212 le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0;
8214 bp->fw_rx_stats_ext_size = 0;
8215 bp->fw_tx_stats_ext_size = 0;
8217 hwrm_req_drop(bp, req_qs);
8222 if (bp->fw_tx_stats_ext_size <=
8223 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
8224 bp->pri2cos_valid = 0;
8228 rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG);
8232 req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
8234 resp_qc = hwrm_req_hold(bp, req_qc);
8235 rc = hwrm_req_send(bp, req_qc);
8240 pri2cos = &resp_qc->pri0_cos_queue_id;
8241 for (i = 0; i < 8; i++) {
8242 u8 queue_id = pri2cos[i];
8245 /* Per port queue IDs start from 0, 10, 20, etc */
8246 queue_idx = queue_id % 10;
8247 if (queue_idx > BNXT_MAX_QUEUE) {
8248 bp->pri2cos_valid = false;
8249 hwrm_req_drop(bp, req_qc);
8252 for (j = 0; j < bp->max_q; j++) {
8253 if (bp->q_ids[j] == queue_id)
8254 bp->pri2cos_idx[i] = queue_idx;
8257 bp->pri2cos_valid = true;
8259 hwrm_req_drop(bp, req_qc);
8264 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
8266 bnxt_hwrm_tunnel_dst_port_free(bp,
8267 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
8268 bnxt_hwrm_tunnel_dst_port_free(bp,
8269 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
8272 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
8278 tpa_flags = bp->flags & BNXT_FLAG_TPA;
8279 else if (BNXT_NO_FW_ACCESS(bp))
8281 for (i = 0; i < bp->nr_vnics; i++) {
8282 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
8284 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
8292 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
8296 for (i = 0; i < bp->nr_vnics; i++)
8297 bnxt_hwrm_vnic_set_rss(bp, i, false);
8300 static void bnxt_clear_vnic(struct bnxt *bp)
8305 bnxt_hwrm_clear_vnic_filter(bp);
8306 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
8307 /* clear all RSS setting before free vnic ctx */
8308 bnxt_hwrm_clear_vnic_rss(bp);
8309 bnxt_hwrm_vnic_ctx_free(bp);
8311 /* before free the vnic, undo the vnic tpa settings */
8312 if (bp->flags & BNXT_FLAG_TPA)
8313 bnxt_set_tpa(bp, false);
8314 bnxt_hwrm_vnic_free(bp);
8315 if (bp->flags & BNXT_FLAG_CHIP_P5)
8316 bnxt_hwrm_vnic_ctx_free(bp);
8319 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
8322 bnxt_clear_vnic(bp);
8323 bnxt_hwrm_ring_free(bp, close_path);
8324 bnxt_hwrm_ring_grp_free(bp);
8326 bnxt_hwrm_stat_ctx_free(bp);
8327 bnxt_hwrm_free_tunnel_ports(bp);
8331 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
8333 struct hwrm_func_cfg_input *req;
8337 if (br_mode == BRIDGE_MODE_VEB)
8338 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
8339 else if (br_mode == BRIDGE_MODE_VEPA)
8340 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
8344 rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
8348 req->fid = cpu_to_le16(0xffff);
8349 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
8350 req->evb_mode = evb_mode;
8351 return hwrm_req_send(bp, req);
8354 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
8356 struct hwrm_func_cfg_input *req;
8359 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
8362 rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
8366 req->fid = cpu_to_le16(0xffff);
8367 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
8368 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
8370 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
8372 return hwrm_req_send(bp, req);
8375 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8377 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
8380 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
8383 /* allocate context for vnic */
8384 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
8386 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8388 goto vnic_setup_err;
8390 bp->rsscos_nr_ctxs++;
8392 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8393 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
8395 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
8397 goto vnic_setup_err;
8399 bp->rsscos_nr_ctxs++;
8403 /* configure default vnic, ring grp */
8404 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8406 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8408 goto vnic_setup_err;
8411 /* Enable RSS hashing on vnic */
8412 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
8414 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
8416 goto vnic_setup_err;
8419 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8420 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8422 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8431 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
8435 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
8436 for (i = 0; i < nr_ctxs; i++) {
8437 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
8439 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
8443 bp->rsscos_nr_ctxs++;
8448 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
8450 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
8454 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8456 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8460 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8461 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8463 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8470 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8472 if (bp->flags & BNXT_FLAG_CHIP_P5)
8473 return __bnxt_setup_vnic_p5(bp, vnic_id);
8475 return __bnxt_setup_vnic(bp, vnic_id);
8478 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
8480 #ifdef CONFIG_RFS_ACCEL
8483 if (bp->flags & BNXT_FLAG_CHIP_P5)
8486 for (i = 0; i < bp->rx_nr_rings; i++) {
8487 struct bnxt_vnic_info *vnic;
8488 u16 vnic_id = i + 1;
8491 if (vnic_id >= bp->nr_vnics)
8494 vnic = &bp->vnic_info[vnic_id];
8495 vnic->flags |= BNXT_VNIC_RFS_FLAG;
8496 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
8497 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
8498 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
8500 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8504 rc = bnxt_setup_vnic(bp, vnic_id);
8514 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
8515 static bool bnxt_promisc_ok(struct bnxt *bp)
8517 #ifdef CONFIG_BNXT_SRIOV
8518 if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
8524 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
8526 unsigned int rc = 0;
8528 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
8530 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8535 rc = bnxt_hwrm_vnic_cfg(bp, 1);
8537 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8544 static int bnxt_cfg_rx_mode(struct bnxt *);
8545 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
8547 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
8549 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8551 unsigned int rx_nr_rings = bp->rx_nr_rings;
8554 rc = bnxt_hwrm_stat_ctx_alloc(bp);
8556 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
8562 rc = bnxt_hwrm_ring_alloc(bp);
8564 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
8568 rc = bnxt_hwrm_ring_grp_alloc(bp);
8570 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
8574 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8577 /* default vnic 0 */
8578 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
8580 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
8584 rc = bnxt_setup_vnic(bp, 0);
8588 if (bp->flags & BNXT_FLAG_RFS) {
8589 rc = bnxt_alloc_rfs_vnics(bp);
8594 if (bp->flags & BNXT_FLAG_TPA) {
8595 rc = bnxt_set_tpa(bp, true);
8601 bnxt_update_vf_mac(bp);
8603 /* Filter for default vnic 0 */
8604 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
8606 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
8609 vnic->uc_filter_count = 1;
8612 if (bp->dev->flags & IFF_BROADCAST)
8613 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
8615 if (bp->dev->flags & IFF_PROMISC)
8616 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8618 if (bp->dev->flags & IFF_ALLMULTI) {
8619 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8620 vnic->mc_list_count = 0;
8624 bnxt_mc_list_updated(bp, &mask);
8625 vnic->rx_mask |= mask;
8628 rc = bnxt_cfg_rx_mode(bp);
8632 rc = bnxt_hwrm_set_coal(bp);
8634 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
8637 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8638 rc = bnxt_setup_nitroa0_vnic(bp);
8640 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
8645 bnxt_hwrm_func_qcfg(bp);
8646 netdev_update_features(bp->dev);
8652 bnxt_hwrm_resource_free(bp, 0, true);
8657 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
8659 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
8663 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
8665 bnxt_init_cp_rings(bp);
8666 bnxt_init_rx_rings(bp);
8667 bnxt_init_tx_rings(bp);
8668 bnxt_init_ring_grps(bp, irq_re_init);
8669 bnxt_init_vnics(bp);
8671 return bnxt_init_chip(bp, irq_re_init);
8674 static int bnxt_set_real_num_queues(struct bnxt *bp)
8677 struct net_device *dev = bp->dev;
8679 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
8680 bp->tx_nr_rings_xdp);
8684 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
8688 #ifdef CONFIG_RFS_ACCEL
8689 if (bp->flags & BNXT_FLAG_RFS)
8690 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
8696 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
8699 int _rx = *rx, _tx = *tx;
8702 *rx = min_t(int, _rx, max);
8703 *tx = min_t(int, _tx, max);
8708 while (_rx + _tx > max) {
8709 if (_rx > _tx && _rx > 1)
8720 static void bnxt_setup_msix(struct bnxt *bp)
8722 const int len = sizeof(bp->irq_tbl[0].name);
8723 struct net_device *dev = bp->dev;
8726 tcs = netdev_get_num_tc(dev);
8730 for (i = 0; i < tcs; i++) {
8731 count = bp->tx_nr_rings_per_tc;
8733 netdev_set_tc_queue(dev, i, count, off);
8737 for (i = 0; i < bp->cp_nr_rings; i++) {
8738 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8741 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
8743 else if (i < bp->rx_nr_rings)
8748 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
8750 bp->irq_tbl[map_idx].handler = bnxt_msix;
8754 static void bnxt_setup_inta(struct bnxt *bp)
8756 const int len = sizeof(bp->irq_tbl[0].name);
8758 if (netdev_get_num_tc(bp->dev))
8759 netdev_reset_tc(bp->dev);
8761 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
8763 bp->irq_tbl[0].handler = bnxt_inta;
8766 static int bnxt_init_int_mode(struct bnxt *bp);
8768 static int bnxt_setup_int_mode(struct bnxt *bp)
8773 rc = bnxt_init_int_mode(bp);
8774 if (rc || !bp->irq_tbl)
8775 return rc ?: -ENODEV;
8778 if (bp->flags & BNXT_FLAG_USING_MSIX)
8779 bnxt_setup_msix(bp);
8781 bnxt_setup_inta(bp);
8783 rc = bnxt_set_real_num_queues(bp);
8787 #ifdef CONFIG_RFS_ACCEL
8788 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
8790 return bp->hw_resc.max_rsscos_ctxs;
8793 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
8795 return bp->hw_resc.max_vnics;
8799 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
8801 return bp->hw_resc.max_stat_ctxs;
8804 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
8806 return bp->hw_resc.max_cp_rings;
8809 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
8811 unsigned int cp = bp->hw_resc.max_cp_rings;
8813 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8814 cp -= bnxt_get_ulp_msix_num(bp);
8819 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
8821 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8823 if (bp->flags & BNXT_FLAG_CHIP_P5)
8824 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
8826 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
8829 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
8831 bp->hw_resc.max_irqs = max_irqs;
8834 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
8838 cp = bnxt_get_max_func_cp_rings_for_en(bp);
8839 if (bp->flags & BNXT_FLAG_CHIP_P5)
8840 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
8842 return cp - bp->cp_nr_rings;
8845 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
8847 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
8850 int bnxt_get_avail_msix(struct bnxt *bp, int num)
8852 int max_cp = bnxt_get_max_func_cp_rings(bp);
8853 int max_irq = bnxt_get_max_func_irqs(bp);
8854 int total_req = bp->cp_nr_rings + num;
8855 int max_idx, avail_msix;
8857 max_idx = bp->total_irqs;
8858 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8859 max_idx = min_t(int, bp->total_irqs, max_cp);
8860 avail_msix = max_idx - bp->cp_nr_rings;
8861 if (!BNXT_NEW_RM(bp) || avail_msix >= num)
8864 if (max_irq < total_req) {
8865 num = max_irq - bp->cp_nr_rings;
8872 static int bnxt_get_num_msix(struct bnxt *bp)
8874 if (!BNXT_NEW_RM(bp))
8875 return bnxt_get_max_func_irqs(bp);
8877 return bnxt_nq_rings_in_use(bp);
8880 static int bnxt_init_msix(struct bnxt *bp)
8882 int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
8883 struct msix_entry *msix_ent;
8885 total_vecs = bnxt_get_num_msix(bp);
8886 max = bnxt_get_max_func_irqs(bp);
8887 if (total_vecs > max)
8893 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
8897 for (i = 0; i < total_vecs; i++) {
8898 msix_ent[i].entry = i;
8899 msix_ent[i].vector = 0;
8902 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
8905 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
8906 ulp_msix = bnxt_get_ulp_msix_num(bp);
8907 if (total_vecs < 0 || total_vecs < ulp_msix) {
8909 goto msix_setup_exit;
8912 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
8914 for (i = 0; i < total_vecs; i++)
8915 bp->irq_tbl[i].vector = msix_ent[i].vector;
8917 bp->total_irqs = total_vecs;
8918 /* Trim rings based upon num of vectors allocated */
8919 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
8920 total_vecs - ulp_msix, min == 1);
8922 goto msix_setup_exit;
8924 bp->cp_nr_rings = (min == 1) ?
8925 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
8926 bp->tx_nr_rings + bp->rx_nr_rings;
8930 goto msix_setup_exit;
8932 bp->flags |= BNXT_FLAG_USING_MSIX;
8937 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
8940 pci_disable_msix(bp->pdev);
8945 static int bnxt_init_inta(struct bnxt *bp)
8947 bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL);
8952 bp->rx_nr_rings = 1;
8953 bp->tx_nr_rings = 1;
8954 bp->cp_nr_rings = 1;
8955 bp->flags |= BNXT_FLAG_SHARED_RINGS;
8956 bp->irq_tbl[0].vector = bp->pdev->irq;
8960 static int bnxt_init_int_mode(struct bnxt *bp)
8964 if (bp->flags & BNXT_FLAG_MSIX_CAP)
8965 rc = bnxt_init_msix(bp);
8967 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
8968 /* fallback to INTA */
8969 rc = bnxt_init_inta(bp);
8974 static void bnxt_clear_int_mode(struct bnxt *bp)
8976 if (bp->flags & BNXT_FLAG_USING_MSIX)
8977 pci_disable_msix(bp->pdev);
8981 bp->flags &= ~BNXT_FLAG_USING_MSIX;
8984 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
8986 int tcs = netdev_get_num_tc(bp->dev);
8987 bool irq_cleared = false;
8990 if (!bnxt_need_reserve_rings(bp))
8993 if (irq_re_init && BNXT_NEW_RM(bp) &&
8994 bnxt_get_num_msix(bp) != bp->total_irqs) {
8995 bnxt_ulp_irq_stop(bp);
8996 bnxt_clear_int_mode(bp);
8999 rc = __bnxt_reserve_rings(bp);
9002 rc = bnxt_init_int_mode(bp);
9003 bnxt_ulp_irq_restart(bp, rc);
9006 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
9009 if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
9010 netdev_err(bp->dev, "tx ring reservation failure\n");
9011 netdev_reset_tc(bp->dev);
9012 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
9018 static void bnxt_free_irq(struct bnxt *bp)
9020 struct bnxt_irq *irq;
9023 #ifdef CONFIG_RFS_ACCEL
9024 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
9025 bp->dev->rx_cpu_rmap = NULL;
9027 if (!bp->irq_tbl || !bp->bnapi)
9030 for (i = 0; i < bp->cp_nr_rings; i++) {
9031 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
9033 irq = &bp->irq_tbl[map_idx];
9034 if (irq->requested) {
9035 if (irq->have_cpumask) {
9036 irq_set_affinity_hint(irq->vector, NULL);
9037 free_cpumask_var(irq->cpu_mask);
9038 irq->have_cpumask = 0;
9040 free_irq(irq->vector, bp->bnapi[i]);
9047 static int bnxt_request_irq(struct bnxt *bp)
9050 unsigned long flags = 0;
9051 #ifdef CONFIG_RFS_ACCEL
9052 struct cpu_rmap *rmap;
9055 rc = bnxt_setup_int_mode(bp);
9057 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
9061 #ifdef CONFIG_RFS_ACCEL
9062 rmap = bp->dev->rx_cpu_rmap;
9064 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
9065 flags = IRQF_SHARED;
9067 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
9068 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
9069 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
9071 #ifdef CONFIG_RFS_ACCEL
9072 if (rmap && bp->bnapi[i]->rx_ring) {
9073 rc = irq_cpu_rmap_add(rmap, irq->vector);
9075 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
9080 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
9087 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
9088 int numa_node = dev_to_node(&bp->pdev->dev);
9090 irq->have_cpumask = 1;
9091 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
9093 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
9095 netdev_warn(bp->dev,
9096 "Set affinity failed, IRQ = %d\n",
9105 static void bnxt_del_napi(struct bnxt *bp)
9112 for (i = 0; i < bp->cp_nr_rings; i++) {
9113 struct bnxt_napi *bnapi = bp->bnapi[i];
9115 __netif_napi_del(&bnapi->napi);
9117 /* We called __netif_napi_del(), we need
9118 * to respect an RCU grace period before freeing napi structures.
9123 static void bnxt_init_napi(struct bnxt *bp)
9126 unsigned int cp_nr_rings = bp->cp_nr_rings;
9127 struct bnxt_napi *bnapi;
9129 if (bp->flags & BNXT_FLAG_USING_MSIX) {
9130 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
9132 if (bp->flags & BNXT_FLAG_CHIP_P5)
9133 poll_fn = bnxt_poll_p5;
9134 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
9136 for (i = 0; i < cp_nr_rings; i++) {
9137 bnapi = bp->bnapi[i];
9138 netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
9140 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
9141 bnapi = bp->bnapi[cp_nr_rings];
9142 netif_napi_add(bp->dev, &bnapi->napi,
9143 bnxt_poll_nitroa0, 64);
9146 bnapi = bp->bnapi[0];
9147 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
9151 static void bnxt_disable_napi(struct bnxt *bp)
9156 test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
9159 for (i = 0; i < bp->cp_nr_rings; i++) {
9160 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
9162 napi_disable(&bp->bnapi[i]->napi);
9163 if (bp->bnapi[i]->rx_ring)
9164 cancel_work_sync(&cpr->dim.work);
9168 static void bnxt_enable_napi(struct bnxt *bp)
9172 clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
9173 for (i = 0; i < bp->cp_nr_rings; i++) {
9174 struct bnxt_napi *bnapi = bp->bnapi[i];
9175 struct bnxt_cp_ring_info *cpr;
9177 cpr = &bnapi->cp_ring;
9178 if (bnapi->in_reset)
9179 cpr->sw_stats.rx.rx_resets++;
9180 bnapi->in_reset = false;
9182 if (bnapi->rx_ring) {
9183 INIT_WORK(&cpr->dim.work, bnxt_dim_work);
9184 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
9186 napi_enable(&bnapi->napi);
9190 void bnxt_tx_disable(struct bnxt *bp)
9193 struct bnxt_tx_ring_info *txr;
9196 for (i = 0; i < bp->tx_nr_rings; i++) {
9197 txr = &bp->tx_ring[i];
9198 WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
9201 /* Make sure napi polls see @dev_state change */
9203 /* Drop carrier first to prevent TX timeout */
9204 netif_carrier_off(bp->dev);
9205 /* Stop all TX queues */
9206 netif_tx_disable(bp->dev);
9209 void bnxt_tx_enable(struct bnxt *bp)
9212 struct bnxt_tx_ring_info *txr;
9214 for (i = 0; i < bp->tx_nr_rings; i++) {
9215 txr = &bp->tx_ring[i];
9216 WRITE_ONCE(txr->dev_state, 0);
9218 /* Make sure napi polls see @dev_state change */
9220 netif_tx_wake_all_queues(bp->dev);
9221 if (bp->link_info.link_up)
9222 netif_carrier_on(bp->dev);
9225 static char *bnxt_report_fec(struct bnxt_link_info *link_info)
9227 u8 active_fec = link_info->active_fec_sig_mode &
9228 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
9230 switch (active_fec) {
9232 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
9234 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
9235 return "Clause 74 BaseR";
9236 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
9237 return "Clause 91 RS(528,514)";
9238 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
9239 return "Clause 91 RS544_1XN";
9240 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
9241 return "Clause 91 RS(544,514)";
9242 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
9243 return "Clause 91 RS272_1XN";
9244 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
9245 return "Clause 91 RS(272,257)";
9249 static void bnxt_report_link(struct bnxt *bp)
9251 if (bp->link_info.link_up) {
9252 const char *signal = "";
9253 const char *flow_ctrl;
9258 netif_carrier_on(bp->dev);
9259 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
9260 if (speed == SPEED_UNKNOWN) {
9261 netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
9264 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
9268 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
9269 flow_ctrl = "ON - receive & transmit";
9270 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
9271 flow_ctrl = "ON - transmit";
9272 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
9273 flow_ctrl = "ON - receive";
9276 if (bp->link_info.phy_qcfg_resp.option_flags &
9277 PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
9278 u8 sig_mode = bp->link_info.active_fec_sig_mode &
9279 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
9281 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
9284 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
9291 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
9292 speed, signal, duplex, flow_ctrl);
9293 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
9294 netdev_info(bp->dev, "EEE is %s\n",
9295 bp->eee.eee_active ? "active" :
9297 fec = bp->link_info.fec_cfg;
9298 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
9299 netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
9300 (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
9301 bnxt_report_fec(&bp->link_info));
9303 netif_carrier_off(bp->dev);
9304 netdev_err(bp->dev, "NIC Link is Down\n");
9308 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
9310 if (!resp->supported_speeds_auto_mode &&
9311 !resp->supported_speeds_force_mode &&
9312 !resp->supported_pam4_speeds_auto_mode &&
9313 !resp->supported_pam4_speeds_force_mode)
9318 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
9320 struct bnxt_link_info *link_info = &bp->link_info;
9321 struct hwrm_port_phy_qcaps_output *resp;
9322 struct hwrm_port_phy_qcaps_input *req;
9325 if (bp->hwrm_spec_code < 0x10201)
9328 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
9332 resp = hwrm_req_hold(bp, req);
9333 rc = hwrm_req_send(bp, req);
9335 goto hwrm_phy_qcaps_exit;
9337 bp->phy_flags = resp->flags;
9338 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
9339 struct ethtool_eee *eee = &bp->eee;
9340 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
9342 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9343 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
9344 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
9345 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
9346 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
9349 if (bp->hwrm_spec_code >= 0x10a01) {
9350 if (bnxt_phy_qcaps_no_speed(resp)) {
9351 link_info->phy_state = BNXT_PHY_STATE_DISABLED;
9352 netdev_warn(bp->dev, "Ethernet link disabled\n");
9353 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
9354 link_info->phy_state = BNXT_PHY_STATE_ENABLED;
9355 netdev_info(bp->dev, "Ethernet link enabled\n");
9356 /* Phy re-enabled, reprobe the speeds */
9357 link_info->support_auto_speeds = 0;
9358 link_info->support_pam4_auto_speeds = 0;
9361 if (resp->supported_speeds_auto_mode)
9362 link_info->support_auto_speeds =
9363 le16_to_cpu(resp->supported_speeds_auto_mode);
9364 if (resp->supported_pam4_speeds_auto_mode)
9365 link_info->support_pam4_auto_speeds =
9366 le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
9368 bp->port_count = resp->port_cnt;
9370 hwrm_phy_qcaps_exit:
9371 hwrm_req_drop(bp, req);
9375 static bool bnxt_support_dropped(u16 advertising, u16 supported)
9377 u16 diff = advertising ^ supported;
9379 return ((supported | diff) != supported);
9382 int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
9384 struct bnxt_link_info *link_info = &bp->link_info;
9385 struct hwrm_port_phy_qcfg_output *resp;
9386 struct hwrm_port_phy_qcfg_input *req;
9387 u8 link_up = link_info->link_up;
9388 bool support_changed = false;
9391 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG);
9395 resp = hwrm_req_hold(bp, req);
9396 rc = hwrm_req_send(bp, req);
9398 hwrm_req_drop(bp, req);
9402 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
9403 link_info->phy_link_status = resp->link;
9404 link_info->duplex = resp->duplex_cfg;
9405 if (bp->hwrm_spec_code >= 0x10800)
9406 link_info->duplex = resp->duplex_state;
9407 link_info->pause = resp->pause;
9408 link_info->auto_mode = resp->auto_mode;
9409 link_info->auto_pause_setting = resp->auto_pause;
9410 link_info->lp_pause = resp->link_partner_adv_pause;
9411 link_info->force_pause_setting = resp->force_pause;
9412 link_info->duplex_setting = resp->duplex_cfg;
9413 if (link_info->phy_link_status == BNXT_LINK_LINK)
9414 link_info->link_speed = le16_to_cpu(resp->link_speed);
9416 link_info->link_speed = 0;
9417 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
9418 link_info->force_pam4_link_speed =
9419 le16_to_cpu(resp->force_pam4_link_speed);
9420 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
9421 link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
9422 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
9423 link_info->auto_pam4_link_speeds =
9424 le16_to_cpu(resp->auto_pam4_link_speed_mask);
9425 link_info->lp_auto_link_speeds =
9426 le16_to_cpu(resp->link_partner_adv_speeds);
9427 link_info->lp_auto_pam4_link_speeds =
9428 resp->link_partner_pam4_adv_speeds;
9429 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
9430 link_info->phy_ver[0] = resp->phy_maj;
9431 link_info->phy_ver[1] = resp->phy_min;
9432 link_info->phy_ver[2] = resp->phy_bld;
9433 link_info->media_type = resp->media_type;
9434 link_info->phy_type = resp->phy_type;
9435 link_info->transceiver = resp->xcvr_pkg_type;
9436 link_info->phy_addr = resp->eee_config_phy_addr &
9437 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
9438 link_info->module_status = resp->module_status;
9440 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
9441 struct ethtool_eee *eee = &bp->eee;
9444 eee->eee_active = 0;
9445 if (resp->eee_config_phy_addr &
9446 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
9447 eee->eee_active = 1;
9448 fw_speeds = le16_to_cpu(
9449 resp->link_partner_adv_eee_link_speed_mask);
9450 eee->lp_advertised =
9451 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9454 /* Pull initial EEE config */
9455 if (!chng_link_state) {
9456 if (resp->eee_config_phy_addr &
9457 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
9458 eee->eee_enabled = 1;
9460 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
9462 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9464 if (resp->eee_config_phy_addr &
9465 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
9468 eee->tx_lpi_enabled = 1;
9469 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
9470 eee->tx_lpi_timer = le32_to_cpu(tmr) &
9471 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
9476 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
9477 if (bp->hwrm_spec_code >= 0x10504) {
9478 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
9479 link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
9481 /* TODO: need to add more logic to report VF link */
9482 if (chng_link_state) {
9483 if (link_info->phy_link_status == BNXT_LINK_LINK)
9484 link_info->link_up = 1;
9486 link_info->link_up = 0;
9487 if (link_up != link_info->link_up)
9488 bnxt_report_link(bp);
9490 /* alwasy link down if not require to update link state */
9491 link_info->link_up = 0;
9493 hwrm_req_drop(bp, req);
9495 if (!BNXT_PHY_CFG_ABLE(bp))
9498 /* Check if any advertised speeds are no longer supported. The caller
9499 * holds the link_lock mutex, so we can modify link_info settings.
9501 if (bnxt_support_dropped(link_info->advertising,
9502 link_info->support_auto_speeds)) {
9503 link_info->advertising = link_info->support_auto_speeds;
9504 support_changed = true;
9506 if (bnxt_support_dropped(link_info->advertising_pam4,
9507 link_info->support_pam4_auto_speeds)) {
9508 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
9509 support_changed = true;
9511 if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
9512 bnxt_hwrm_set_link_setting(bp, true, false);
9516 static void bnxt_get_port_module_status(struct bnxt *bp)
9518 struct bnxt_link_info *link_info = &bp->link_info;
9519 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
9522 if (bnxt_update_link(bp, true))
9525 module_status = link_info->module_status;
9526 switch (module_status) {
9527 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
9528 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
9529 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
9530 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
9532 if (bp->hwrm_spec_code >= 0x10201) {
9533 netdev_warn(bp->dev, "Module part number %s\n",
9534 resp->phy_vendor_partnumber);
9536 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
9537 netdev_warn(bp->dev, "TX is disabled\n");
9538 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
9539 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
9544 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9546 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
9547 if (bp->hwrm_spec_code >= 0x10201)
9549 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
9550 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9551 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
9552 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9553 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
9555 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9557 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9558 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
9559 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9560 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
9562 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
9563 if (bp->hwrm_spec_code >= 0x10201) {
9564 req->auto_pause = req->force_pause;
9565 req->enables |= cpu_to_le32(
9566 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9571 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9573 if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
9574 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
9575 if (bp->link_info.advertising) {
9576 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
9577 req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
9579 if (bp->link_info.advertising_pam4) {
9581 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
9582 req->auto_link_pam4_speed_mask =
9583 cpu_to_le16(bp->link_info.advertising_pam4);
9585 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
9586 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
9588 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
9589 if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
9590 req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9591 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
9593 req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9597 /* tell chimp that the setting takes effect immediately */
9598 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
9601 int bnxt_hwrm_set_pause(struct bnxt *bp)
9603 struct hwrm_port_phy_cfg_input *req;
9606 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9610 bnxt_hwrm_set_pause_common(bp, req);
9612 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
9613 bp->link_info.force_link_chng)
9614 bnxt_hwrm_set_link_common(bp, req);
9616 rc = hwrm_req_send(bp, req);
9617 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
9618 /* since changing of pause setting doesn't trigger any link
9619 * change event, the driver needs to update the current pause
9620 * result upon successfully return of the phy_cfg command
9622 bp->link_info.pause =
9623 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
9624 bp->link_info.auto_pause_setting = 0;
9625 if (!bp->link_info.force_link_chng)
9626 bnxt_report_link(bp);
9628 bp->link_info.force_link_chng = false;
9632 static void bnxt_hwrm_set_eee(struct bnxt *bp,
9633 struct hwrm_port_phy_cfg_input *req)
9635 struct ethtool_eee *eee = &bp->eee;
9637 if (eee->eee_enabled) {
9639 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
9641 if (eee->tx_lpi_enabled)
9642 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
9644 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
9646 req->flags |= cpu_to_le32(flags);
9647 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
9648 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
9649 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
9651 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
9655 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
9657 struct hwrm_port_phy_cfg_input *req;
9660 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9665 bnxt_hwrm_set_pause_common(bp, req);
9667 bnxt_hwrm_set_link_common(bp, req);
9670 bnxt_hwrm_set_eee(bp, req);
9671 return hwrm_req_send(bp, req);
9674 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
9676 struct hwrm_port_phy_cfg_input *req;
9679 if (!BNXT_SINGLE_PF(bp))
9682 if (pci_num_vf(bp->pdev) &&
9683 !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
9686 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9690 req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
9691 return hwrm_req_send(bp, req);
9694 static int bnxt_fw_init_one(struct bnxt *bp);
9696 static int bnxt_fw_reset_via_optee(struct bnxt *bp)
9698 #ifdef CONFIG_TEE_BNXT_FW
9699 int rc = tee_bnxt_fw_load();
9702 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
9706 netdev_err(bp->dev, "OP-TEE not supported\n");
9711 static int bnxt_try_recover_fw(struct bnxt *bp)
9713 if (bp->fw_health && bp->fw_health->status_reliable) {
9718 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
9719 rc = bnxt_hwrm_poll(bp);
9720 if (!BNXT_FW_IS_BOOTING(sts) &&
9721 !BNXT_FW_IS_RECOVERING(sts))
9724 } while (rc == -EBUSY && retry < BNXT_FW_RETRY);
9726 if (!BNXT_FW_IS_HEALTHY(sts)) {
9728 "Firmware not responding, status: 0x%x\n",
9732 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
9733 netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
9734 return bnxt_fw_reset_via_optee(bp);
9742 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
9744 struct hwrm_func_drv_if_change_output *resp;
9745 struct hwrm_func_drv_if_change_input *req;
9746 bool fw_reset = !bp->irq_tbl;
9747 bool resc_reinit = false;
9751 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
9754 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE);
9759 req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
9760 resp = hwrm_req_hold(bp, req);
9762 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
9763 while (retry < BNXT_FW_IF_RETRY) {
9764 rc = hwrm_req_send(bp, req);
9772 if (rc == -EAGAIN) {
9773 hwrm_req_drop(bp, req);
9776 flags = le32_to_cpu(resp->flags);
9778 rc = bnxt_try_recover_fw(bp);
9781 hwrm_req_drop(bp, req);
9786 bnxt_inv_fw_health_reg(bp);
9790 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
9792 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
9794 else if (bp->fw_health && !bp->fw_health->status_reliable)
9795 bnxt_try_map_fw_health_reg(bp);
9797 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
9798 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
9799 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9802 if (resc_reinit || fw_reset) {
9804 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9805 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
9807 bnxt_free_ctx_mem(bp);
9811 rc = bnxt_fw_init_one(bp);
9813 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9814 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9817 bnxt_clear_int_mode(bp);
9818 rc = bnxt_init_int_mode(bp);
9820 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9821 netdev_err(bp->dev, "init int mode failed\n");
9825 if (BNXT_NEW_RM(bp)) {
9826 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9828 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
9830 netdev_err(bp->dev, "resc_qcaps failed\n");
9832 hw_resc->resv_cp_rings = 0;
9833 hw_resc->resv_stat_ctxs = 0;
9834 hw_resc->resv_irqs = 0;
9835 hw_resc->resv_tx_rings = 0;
9836 hw_resc->resv_rx_rings = 0;
9837 hw_resc->resv_hw_ring_grps = 0;
9838 hw_resc->resv_vnics = 0;
9840 bp->tx_nr_rings = 0;
9841 bp->rx_nr_rings = 0;
9848 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
9850 struct hwrm_port_led_qcaps_output *resp;
9851 struct hwrm_port_led_qcaps_input *req;
9852 struct bnxt_pf_info *pf = &bp->pf;
9856 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
9859 rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS);
9863 req->port_id = cpu_to_le16(pf->port_id);
9864 resp = hwrm_req_hold(bp, req);
9865 rc = hwrm_req_send(bp, req);
9867 hwrm_req_drop(bp, req);
9870 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
9873 bp->num_leds = resp->num_leds;
9874 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
9876 for (i = 0; i < bp->num_leds; i++) {
9877 struct bnxt_led_info *led = &bp->leds[i];
9878 __le16 caps = led->led_state_caps;
9880 if (!led->led_group_id ||
9881 !BNXT_LED_ALT_BLINK_CAP(caps)) {
9887 hwrm_req_drop(bp, req);
9891 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
9893 struct hwrm_wol_filter_alloc_output *resp;
9894 struct hwrm_wol_filter_alloc_input *req;
9897 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC);
9901 req->port_id = cpu_to_le16(bp->pf.port_id);
9902 req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
9903 req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
9904 memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN);
9906 resp = hwrm_req_hold(bp, req);
9907 rc = hwrm_req_send(bp, req);
9909 bp->wol_filter_id = resp->wol_filter_id;
9910 hwrm_req_drop(bp, req);
9914 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
9916 struct hwrm_wol_filter_free_input *req;
9919 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE);
9923 req->port_id = cpu_to_le16(bp->pf.port_id);
9924 req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
9925 req->wol_filter_id = bp->wol_filter_id;
9927 return hwrm_req_send(bp, req);
9930 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
9932 struct hwrm_wol_filter_qcfg_output *resp;
9933 struct hwrm_wol_filter_qcfg_input *req;
9934 u16 next_handle = 0;
9937 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG);
9941 req->port_id = cpu_to_le16(bp->pf.port_id);
9942 req->handle = cpu_to_le16(handle);
9943 resp = hwrm_req_hold(bp, req);
9944 rc = hwrm_req_send(bp, req);
9946 next_handle = le16_to_cpu(resp->next_handle);
9947 if (next_handle != 0) {
9948 if (resp->wol_type ==
9949 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
9951 bp->wol_filter_id = resp->wol_filter_id;
9955 hwrm_req_drop(bp, req);
9959 static void bnxt_get_wol_settings(struct bnxt *bp)
9964 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
9968 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
9969 } while (handle && handle != 0xffff);
9972 #ifdef CONFIG_BNXT_HWMON
9973 static ssize_t bnxt_show_temp(struct device *dev,
9974 struct device_attribute *devattr, char *buf)
9976 struct hwrm_temp_monitor_query_output *resp;
9977 struct hwrm_temp_monitor_query_input *req;
9978 struct bnxt *bp = dev_get_drvdata(dev);
9982 rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
9985 resp = hwrm_req_hold(bp, req);
9986 rc = hwrm_req_send(bp, req);
9988 len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
9989 hwrm_req_drop(bp, req);
9994 static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
9996 static struct attribute *bnxt_attrs[] = {
9997 &sensor_dev_attr_temp1_input.dev_attr.attr,
10000 ATTRIBUTE_GROUPS(bnxt);
10002 static void bnxt_hwmon_close(struct bnxt *bp)
10004 if (bp->hwmon_dev) {
10005 hwmon_device_unregister(bp->hwmon_dev);
10006 bp->hwmon_dev = NULL;
10010 static void bnxt_hwmon_open(struct bnxt *bp)
10012 struct hwrm_temp_monitor_query_input *req;
10013 struct pci_dev *pdev = bp->pdev;
10016 rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
10018 rc = hwrm_req_send_silent(bp, req);
10019 if (rc == -EACCES || rc == -EOPNOTSUPP) {
10020 bnxt_hwmon_close(bp);
10027 bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
10028 DRV_MODULE_NAME, bp,
10030 if (IS_ERR(bp->hwmon_dev)) {
10031 bp->hwmon_dev = NULL;
10032 dev_warn(&pdev->dev, "Cannot register hwmon device\n");
10036 static void bnxt_hwmon_close(struct bnxt *bp)
10040 static void bnxt_hwmon_open(struct bnxt *bp)
10045 static bool bnxt_eee_config_ok(struct bnxt *bp)
10047 struct ethtool_eee *eee = &bp->eee;
10048 struct bnxt_link_info *link_info = &bp->link_info;
10050 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
10053 if (eee->eee_enabled) {
10055 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
10057 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
10058 eee->eee_enabled = 0;
10061 if (eee->advertised & ~advertising) {
10062 eee->advertised = advertising & eee->supported;
10069 static int bnxt_update_phy_setting(struct bnxt *bp)
10072 bool update_link = false;
10073 bool update_pause = false;
10074 bool update_eee = false;
10075 struct bnxt_link_info *link_info = &bp->link_info;
10077 rc = bnxt_update_link(bp, true);
10079 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
10083 if (!BNXT_SINGLE_PF(bp))
10086 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
10087 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
10088 link_info->req_flow_ctrl)
10089 update_pause = true;
10090 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
10091 link_info->force_pause_setting != link_info->req_flow_ctrl)
10092 update_pause = true;
10093 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
10094 if (BNXT_AUTO_MODE(link_info->auto_mode))
10095 update_link = true;
10096 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
10097 link_info->req_link_speed != link_info->force_link_speed)
10098 update_link = true;
10099 else if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
10100 link_info->req_link_speed != link_info->force_pam4_link_speed)
10101 update_link = true;
10102 if (link_info->req_duplex != link_info->duplex_setting)
10103 update_link = true;
10105 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
10106 update_link = true;
10107 if (link_info->advertising != link_info->auto_link_speeds ||
10108 link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
10109 update_link = true;
10112 /* The last close may have shutdown the link, so need to call
10113 * PHY_CFG to bring it back up.
10115 if (!bp->link_info.link_up)
10116 update_link = true;
10118 if (!bnxt_eee_config_ok(bp))
10122 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
10123 else if (update_pause)
10124 rc = bnxt_hwrm_set_pause(bp);
10126 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
10134 /* Common routine to pre-map certain register block to different GRC window.
10135 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
10136 * in PF and 3 windows in VF that can be customized to map in different
10139 static void bnxt_preset_reg_win(struct bnxt *bp)
10142 /* CAG registers map to GRC window #4 */
10143 writel(BNXT_CAG_REG_BASE,
10144 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
10148 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
10150 static int bnxt_reinit_after_abort(struct bnxt *bp)
10154 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10157 if (bp->dev->reg_state == NETREG_UNREGISTERED)
10160 rc = bnxt_fw_init_one(bp);
10162 bnxt_clear_int_mode(bp);
10163 rc = bnxt_init_int_mode(bp);
10165 clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10166 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
10172 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10176 bnxt_preset_reg_win(bp);
10177 netif_carrier_off(bp->dev);
10179 /* Reserve rings now if none were reserved at driver probe. */
10180 rc = bnxt_init_dflt_ring_mode(bp);
10182 netdev_err(bp->dev, "Failed to reserve default rings at open\n");
10186 rc = bnxt_reserve_rings(bp, irq_re_init);
10189 if ((bp->flags & BNXT_FLAG_RFS) &&
10190 !(bp->flags & BNXT_FLAG_USING_MSIX)) {
10191 /* disable RFS if falling back to INTA */
10192 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
10193 bp->flags &= ~BNXT_FLAG_RFS;
10196 rc = bnxt_alloc_mem(bp, irq_re_init);
10198 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10199 goto open_err_free_mem;
10203 bnxt_init_napi(bp);
10204 rc = bnxt_request_irq(bp);
10206 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
10211 rc = bnxt_init_nic(bp, irq_re_init);
10213 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
10217 bnxt_enable_napi(bp);
10218 bnxt_debug_dev_init(bp);
10220 if (link_re_init) {
10221 mutex_lock(&bp->link_lock);
10222 rc = bnxt_update_phy_setting(bp);
10223 mutex_unlock(&bp->link_lock);
10225 netdev_warn(bp->dev, "failed to update phy settings\n");
10226 if (BNXT_SINGLE_PF(bp)) {
10227 bp->link_info.phy_retry = true;
10228 bp->link_info.phy_retry_expires =
10235 udp_tunnel_nic_reset_ntf(bp->dev);
10237 set_bit(BNXT_STATE_OPEN, &bp->state);
10238 bnxt_enable_int(bp);
10239 /* Enable TX queues */
10240 bnxt_tx_enable(bp);
10241 mod_timer(&bp->timer, jiffies + bp->current_interval);
10242 /* Poll link status and check for SFP+ module status */
10243 mutex_lock(&bp->link_lock);
10244 bnxt_get_port_module_status(bp);
10245 mutex_unlock(&bp->link_lock);
10247 /* VF-reps may need to be re-opened after the PF is re-opened */
10249 bnxt_vf_reps_open(bp);
10256 bnxt_free_skbs(bp);
10258 bnxt_free_mem(bp, true);
10262 /* rtnl_lock held */
10263 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10267 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
10270 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
10272 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
10273 dev_close(bp->dev);
10278 /* rtnl_lock held, open the NIC half way by allocating all resources, but
10279 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
10282 int bnxt_half_open_nic(struct bnxt *bp)
10286 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
10287 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
10289 goto half_open_err;
10292 rc = bnxt_alloc_mem(bp, false);
10294 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10295 goto half_open_err;
10297 rc = bnxt_init_nic(bp, false);
10299 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
10300 goto half_open_err;
10305 bnxt_free_skbs(bp);
10306 bnxt_free_mem(bp, false);
10307 dev_close(bp->dev);
10311 /* rtnl_lock held, this call can only be made after a previous successful
10312 * call to bnxt_half_open_nic().
10314 void bnxt_half_close_nic(struct bnxt *bp)
10316 bnxt_hwrm_resource_free(bp, false, false);
10317 bnxt_free_skbs(bp);
10318 bnxt_free_mem(bp, false);
10321 static void bnxt_reenable_sriov(struct bnxt *bp)
10324 struct bnxt_pf_info *pf = &bp->pf;
10325 int n = pf->active_vfs;
10328 bnxt_cfg_hw_sriov(bp, &n, true);
10332 static int bnxt_open(struct net_device *dev)
10334 struct bnxt *bp = netdev_priv(dev);
10337 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
10338 rc = bnxt_reinit_after_abort(bp);
10341 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
10343 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
10348 rc = bnxt_hwrm_if_change(bp, true);
10352 rc = __bnxt_open_nic(bp, true, true);
10354 bnxt_hwrm_if_change(bp, false);
10356 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
10357 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10358 bnxt_ulp_start(bp, 0);
10359 bnxt_reenable_sriov(bp);
10362 bnxt_hwmon_open(bp);
10368 static bool bnxt_drv_busy(struct bnxt *bp)
10370 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
10371 test_bit(BNXT_STATE_READ_STATS, &bp->state));
10374 static void bnxt_get_ring_stats(struct bnxt *bp,
10375 struct rtnl_link_stats64 *stats);
10377 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
10380 /* Close the VF-reps before closing PF */
10382 bnxt_vf_reps_close(bp);
10384 /* Change device state to avoid TX queue wake up's */
10385 bnxt_tx_disable(bp);
10387 clear_bit(BNXT_STATE_OPEN, &bp->state);
10388 smp_mb__after_atomic();
10389 while (bnxt_drv_busy(bp))
10392 /* Flush rings and and disable interrupts */
10393 bnxt_shutdown_nic(bp, irq_re_init);
10395 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
10397 bnxt_debug_dev_exit(bp);
10398 bnxt_disable_napi(bp);
10399 del_timer_sync(&bp->timer);
10400 bnxt_free_skbs(bp);
10402 /* Save ring stats before shutdown */
10403 if (bp->bnapi && irq_re_init)
10404 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
10409 bnxt_free_mem(bp, irq_re_init);
10412 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10416 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10417 /* If we get here, it means firmware reset is in progress
10418 * while we are trying to close. We can safely proceed with
10419 * the close because we are holding rtnl_lock(). Some firmware
10420 * messages may fail as we proceed to close. We set the
10421 * ABORT_ERR flag here so that the FW reset thread will later
10422 * abort when it gets the rtnl_lock() and sees the flag.
10424 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
10425 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10428 #ifdef CONFIG_BNXT_SRIOV
10429 if (bp->sriov_cfg) {
10430 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
10432 BNXT_SRIOV_CFG_WAIT_TMO);
10434 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
10437 __bnxt_close_nic(bp, irq_re_init, link_re_init);
10441 static int bnxt_close(struct net_device *dev)
10443 struct bnxt *bp = netdev_priv(dev);
10445 bnxt_hwmon_close(bp);
10446 bnxt_close_nic(bp, true, true);
10447 bnxt_hwrm_shutdown_link(bp);
10448 bnxt_hwrm_if_change(bp, false);
10452 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
10455 struct hwrm_port_phy_mdio_read_output *resp;
10456 struct hwrm_port_phy_mdio_read_input *req;
10459 if (bp->hwrm_spec_code < 0x10a00)
10460 return -EOPNOTSUPP;
10462 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ);
10466 req->port_id = cpu_to_le16(bp->pf.port_id);
10467 req->phy_addr = phy_addr;
10468 req->reg_addr = cpu_to_le16(reg & 0x1f);
10469 if (mdio_phy_id_is_c45(phy_addr)) {
10470 req->cl45_mdio = 1;
10471 req->phy_addr = mdio_phy_id_prtad(phy_addr);
10472 req->dev_addr = mdio_phy_id_devad(phy_addr);
10473 req->reg_addr = cpu_to_le16(reg);
10476 resp = hwrm_req_hold(bp, req);
10477 rc = hwrm_req_send(bp, req);
10479 *val = le16_to_cpu(resp->reg_data);
10480 hwrm_req_drop(bp, req);
10484 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
10487 struct hwrm_port_phy_mdio_write_input *req;
10490 if (bp->hwrm_spec_code < 0x10a00)
10491 return -EOPNOTSUPP;
10493 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE);
10497 req->port_id = cpu_to_le16(bp->pf.port_id);
10498 req->phy_addr = phy_addr;
10499 req->reg_addr = cpu_to_le16(reg & 0x1f);
10500 if (mdio_phy_id_is_c45(phy_addr)) {
10501 req->cl45_mdio = 1;
10502 req->phy_addr = mdio_phy_id_prtad(phy_addr);
10503 req->dev_addr = mdio_phy_id_devad(phy_addr);
10504 req->reg_addr = cpu_to_le16(reg);
10506 req->reg_data = cpu_to_le16(val);
10508 return hwrm_req_send(bp, req);
10511 /* rtnl_lock held */
10512 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10514 struct mii_ioctl_data *mdio = if_mii(ifr);
10515 struct bnxt *bp = netdev_priv(dev);
10520 mdio->phy_id = bp->link_info.phy_addr;
10523 case SIOCGMIIREG: {
10524 u16 mii_regval = 0;
10526 if (!netif_running(dev))
10529 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
10531 mdio->val_out = mii_regval;
10536 if (!netif_running(dev))
10539 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
10542 case SIOCSHWTSTAMP:
10543 return bnxt_hwtstamp_set(dev, ifr);
10545 case SIOCGHWTSTAMP:
10546 return bnxt_hwtstamp_get(dev, ifr);
10552 return -EOPNOTSUPP;
10555 static void bnxt_get_ring_stats(struct bnxt *bp,
10556 struct rtnl_link_stats64 *stats)
10560 for (i = 0; i < bp->cp_nr_rings; i++) {
10561 struct bnxt_napi *bnapi = bp->bnapi[i];
10562 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
10563 u64 *sw = cpr->stats.sw_stats;
10565 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
10566 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10567 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
10569 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
10570 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
10571 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
10573 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
10574 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
10575 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
10577 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
10578 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
10579 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
10581 stats->rx_missed_errors +=
10582 BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
10584 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10586 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
10588 stats->rx_dropped +=
10589 cpr->sw_stats.rx.rx_netpoll_discards +
10590 cpr->sw_stats.rx.rx_oom_discards;
10594 static void bnxt_add_prev_stats(struct bnxt *bp,
10595 struct rtnl_link_stats64 *stats)
10597 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
10599 stats->rx_packets += prev_stats->rx_packets;
10600 stats->tx_packets += prev_stats->tx_packets;
10601 stats->rx_bytes += prev_stats->rx_bytes;
10602 stats->tx_bytes += prev_stats->tx_bytes;
10603 stats->rx_missed_errors += prev_stats->rx_missed_errors;
10604 stats->multicast += prev_stats->multicast;
10605 stats->rx_dropped += prev_stats->rx_dropped;
10606 stats->tx_dropped += prev_stats->tx_dropped;
10610 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
10612 struct bnxt *bp = netdev_priv(dev);
10614 set_bit(BNXT_STATE_READ_STATS, &bp->state);
10615 /* Make sure bnxt_close_nic() sees that we are reading stats before
10616 * we check the BNXT_STATE_OPEN flag.
10618 smp_mb__after_atomic();
10619 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10620 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10621 *stats = bp->net_stats_prev;
10625 bnxt_get_ring_stats(bp, stats);
10626 bnxt_add_prev_stats(bp, stats);
10628 if (bp->flags & BNXT_FLAG_PORT_STATS) {
10629 u64 *rx = bp->port_stats.sw_stats;
10630 u64 *tx = bp->port_stats.sw_stats +
10631 BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10633 stats->rx_crc_errors =
10634 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
10635 stats->rx_frame_errors =
10636 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
10637 stats->rx_length_errors =
10638 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
10639 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
10640 BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
10642 BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
10643 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
10644 stats->collisions =
10645 BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
10646 stats->tx_fifo_errors =
10647 BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
10648 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
10650 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10653 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
10655 struct net_device *dev = bp->dev;
10656 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10657 struct netdev_hw_addr *ha;
10660 bool update = false;
10663 netdev_for_each_mc_addr(ha, dev) {
10664 if (mc_count >= BNXT_MAX_MC_ADDRS) {
10665 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10666 vnic->mc_list_count = 0;
10670 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
10671 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
10678 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
10680 if (mc_count != vnic->mc_list_count) {
10681 vnic->mc_list_count = mc_count;
10687 static bool bnxt_uc_list_updated(struct bnxt *bp)
10689 struct net_device *dev = bp->dev;
10690 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10691 struct netdev_hw_addr *ha;
10694 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
10697 netdev_for_each_uc_addr(ha, dev) {
10698 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
10706 static void bnxt_set_rx_mode(struct net_device *dev)
10708 struct bnxt *bp = netdev_priv(dev);
10709 struct bnxt_vnic_info *vnic;
10710 bool mc_update = false;
10714 if (!test_bit(BNXT_STATE_OPEN, &bp->state))
10717 vnic = &bp->vnic_info[0];
10718 mask = vnic->rx_mask;
10719 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
10720 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
10721 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
10722 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
10724 if (dev->flags & IFF_PROMISC)
10725 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10727 uc_update = bnxt_uc_list_updated(bp);
10729 if (dev->flags & IFF_BROADCAST)
10730 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
10731 if (dev->flags & IFF_ALLMULTI) {
10732 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10733 vnic->mc_list_count = 0;
10735 mc_update = bnxt_mc_list_updated(bp, &mask);
10738 if (mask != vnic->rx_mask || uc_update || mc_update) {
10739 vnic->rx_mask = mask;
10741 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
10742 bnxt_queue_sp_work(bp);
10746 static int bnxt_cfg_rx_mode(struct bnxt *bp)
10748 struct net_device *dev = bp->dev;
10749 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10750 struct hwrm_cfa_l2_filter_free_input *req;
10751 struct netdev_hw_addr *ha;
10752 int i, off = 0, rc;
10755 netif_addr_lock_bh(dev);
10756 uc_update = bnxt_uc_list_updated(bp);
10757 netif_addr_unlock_bh(dev);
10762 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
10765 hwrm_req_hold(bp, req);
10766 for (i = 1; i < vnic->uc_filter_count; i++) {
10767 req->l2_filter_id = vnic->fw_l2_filter_id[i];
10769 rc = hwrm_req_send(bp, req);
10771 hwrm_req_drop(bp, req);
10773 vnic->uc_filter_count = 1;
10775 netif_addr_lock_bh(dev);
10776 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
10777 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10779 netdev_for_each_uc_addr(ha, dev) {
10780 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
10782 vnic->uc_filter_count++;
10785 netif_addr_unlock_bh(dev);
10787 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
10788 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
10790 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
10792 vnic->uc_filter_count = i;
10798 if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
10799 !bnxt_promisc_ok(bp))
10800 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10801 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
10802 if (rc && vnic->mc_list_count) {
10803 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
10805 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10806 vnic->mc_list_count = 0;
10807 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
10810 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
10816 static bool bnxt_can_reserve_rings(struct bnxt *bp)
10818 #ifdef CONFIG_BNXT_SRIOV
10819 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
10820 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
10822 /* No minimum rings were provisioned by the PF. Don't
10823 * reserve rings by default when device is down.
10825 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
10828 if (!netif_running(bp->dev))
10835 /* If the chip and firmware supports RFS */
10836 static bool bnxt_rfs_supported(struct bnxt *bp)
10838 if (bp->flags & BNXT_FLAG_CHIP_P5) {
10839 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
10843 /* 212 firmware is broken for aRFS */
10844 if (BNXT_FW_MAJ(bp) == 212)
10846 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
10848 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10853 /* If runtime conditions support RFS */
10854 static bool bnxt_rfs_capable(struct bnxt *bp)
10856 #ifdef CONFIG_RFS_ACCEL
10857 int vnics, max_vnics, max_rss_ctxs;
10859 if (bp->flags & BNXT_FLAG_CHIP_P5)
10860 return bnxt_rfs_supported(bp);
10861 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
10864 vnics = 1 + bp->rx_nr_rings;
10865 max_vnics = bnxt_get_max_func_vnics(bp);
10866 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
10868 /* RSS contexts not a limiting factor */
10869 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10870 max_rss_ctxs = max_vnics;
10871 if (vnics > max_vnics || vnics > max_rss_ctxs) {
10872 if (bp->rx_nr_rings > 1)
10873 netdev_warn(bp->dev,
10874 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
10875 min(max_rss_ctxs - 1, max_vnics - 1));
10879 if (!BNXT_NEW_RM(bp))
10882 if (vnics == bp->hw_resc.resv_vnics)
10885 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
10886 if (vnics <= bp->hw_resc.resv_vnics)
10889 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
10890 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
10897 static netdev_features_t bnxt_fix_features(struct net_device *dev,
10898 netdev_features_t features)
10900 struct bnxt *bp = netdev_priv(dev);
10901 netdev_features_t vlan_features;
10903 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
10904 features &= ~NETIF_F_NTUPLE;
10906 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
10907 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
10909 if (!(features & NETIF_F_GRO))
10910 features &= ~NETIF_F_GRO_HW;
10912 if (features & NETIF_F_GRO_HW)
10913 features &= ~NETIF_F_LRO;
10915 /* Both CTAG and STAG VLAN accelaration on the RX side have to be
10916 * turned on or off together.
10918 vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
10919 if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
10920 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
10921 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
10922 else if (vlan_features)
10923 features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
10925 #ifdef CONFIG_BNXT_SRIOV
10926 if (BNXT_VF(bp) && bp->vf.vlan)
10927 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
10932 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
10934 struct bnxt *bp = netdev_priv(dev);
10935 u32 flags = bp->flags;
10938 bool re_init = false;
10939 bool update_tpa = false;
10941 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
10942 if (features & NETIF_F_GRO_HW)
10943 flags |= BNXT_FLAG_GRO;
10944 else if (features & NETIF_F_LRO)
10945 flags |= BNXT_FLAG_LRO;
10947 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
10948 flags &= ~BNXT_FLAG_TPA;
10950 if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
10951 flags |= BNXT_FLAG_STRIP_VLAN;
10953 if (features & NETIF_F_NTUPLE)
10954 flags |= BNXT_FLAG_RFS;
10956 changes = flags ^ bp->flags;
10957 if (changes & BNXT_FLAG_TPA) {
10959 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
10960 (flags & BNXT_FLAG_TPA) == 0 ||
10961 (bp->flags & BNXT_FLAG_CHIP_P5))
10965 if (changes & ~BNXT_FLAG_TPA)
10968 if (flags != bp->flags) {
10969 u32 old_flags = bp->flags;
10971 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10974 bnxt_set_ring_params(bp);
10979 bnxt_close_nic(bp, false, false);
10982 bnxt_set_ring_params(bp);
10984 return bnxt_open_nic(bp, false, false);
10988 rc = bnxt_set_tpa(bp,
10989 (flags & BNXT_FLAG_TPA) ?
10992 bp->flags = old_flags;
10998 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
11001 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
11006 /* Check that there are at most 2 IPv6 extension headers, no
11007 * fragment header, and each is <= 64 bytes.
11009 start = nw_off + sizeof(*ip6h);
11010 nexthdr = &ip6h->nexthdr;
11011 while (ipv6_ext_hdr(*nexthdr)) {
11012 struct ipv6_opt_hdr *hp;
11015 if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
11016 *nexthdr == NEXTHDR_FRAGMENT)
11018 hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
11019 skb_headlen(skb), NULL);
11022 if (*nexthdr == NEXTHDR_AUTH)
11023 hdrlen = ipv6_authlen(hp);
11025 hdrlen = ipv6_optlen(hp);
11029 nexthdr = &hp->nexthdr;
11034 /* Caller will check inner protocol */
11035 if (skb->encapsulation) {
11041 /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
11042 return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
11045 /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
11046 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
11048 struct udphdr *uh = udp_hdr(skb);
11049 __be16 udp_port = uh->dest;
11051 if (udp_port != bp->vxlan_port && udp_port != bp->nge_port)
11053 if (skb->inner_protocol_type == ENCAP_TYPE_ETHER) {
11054 struct ethhdr *eh = inner_eth_hdr(skb);
11056 switch (eh->h_proto) {
11057 case htons(ETH_P_IP):
11059 case htons(ETH_P_IPV6):
11060 return bnxt_exthdr_check(bp, skb,
11061 skb_inner_network_offset(skb),
11068 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
11070 switch (l4_proto) {
11072 return bnxt_udp_tunl_check(bp, skb);
11075 case IPPROTO_GRE: {
11076 switch (skb->inner_protocol) {
11079 case htons(ETH_P_IP):
11081 case htons(ETH_P_IPV6):
11086 /* Check ext headers of inner ipv6 */
11087 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
11093 static netdev_features_t bnxt_features_check(struct sk_buff *skb,
11094 struct net_device *dev,
11095 netdev_features_t features)
11097 struct bnxt *bp = netdev_priv(dev);
11100 features = vlan_features_check(skb, features);
11101 switch (vlan_get_protocol(skb)) {
11102 case htons(ETH_P_IP):
11103 if (!skb->encapsulation)
11105 l4_proto = &ip_hdr(skb)->protocol;
11106 if (bnxt_tunl_check(bp, skb, *l4_proto))
11109 case htons(ETH_P_IPV6):
11110 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
11113 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
11117 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
11120 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
11123 struct hwrm_dbg_read_direct_output *resp;
11124 struct hwrm_dbg_read_direct_input *req;
11125 __le32 *dbg_reg_buf;
11126 dma_addr_t mapping;
11129 rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT);
11133 dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4,
11135 if (!dbg_reg_buf) {
11137 goto dbg_rd_reg_exit;
11140 req->host_dest_addr = cpu_to_le64(mapping);
11142 resp = hwrm_req_hold(bp, req);
11143 req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
11144 req->read_len32 = cpu_to_le32(num_words);
11146 rc = hwrm_req_send(bp, req);
11147 if (rc || resp->error_code) {
11149 goto dbg_rd_reg_exit;
11151 for (i = 0; i < num_words; i++)
11152 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
11155 hwrm_req_drop(bp, req);
11159 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
11160 u32 ring_id, u32 *prod, u32 *cons)
11162 struct hwrm_dbg_ring_info_get_output *resp;
11163 struct hwrm_dbg_ring_info_get_input *req;
11166 rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET);
11170 req->ring_type = ring_type;
11171 req->fw_ring_id = cpu_to_le32(ring_id);
11172 resp = hwrm_req_hold(bp, req);
11173 rc = hwrm_req_send(bp, req);
11175 *prod = le32_to_cpu(resp->producer_index);
11176 *cons = le32_to_cpu(resp->consumer_index);
11178 hwrm_req_drop(bp, req);
11182 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
11184 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
11185 int i = bnapi->index;
11190 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
11191 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
11195 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
11197 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
11198 int i = bnapi->index;
11203 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
11204 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
11205 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
11206 rxr->rx_sw_agg_prod);
11209 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
11211 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
11212 int i = bnapi->index;
11214 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
11215 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
11218 static void bnxt_dbg_dump_states(struct bnxt *bp)
11221 struct bnxt_napi *bnapi;
11223 for (i = 0; i < bp->cp_nr_rings; i++) {
11224 bnapi = bp->bnapi[i];
11225 if (netif_msg_drv(bp)) {
11226 bnxt_dump_tx_sw_state(bnapi);
11227 bnxt_dump_rx_sw_state(bnapi);
11228 bnxt_dump_cp_sw_state(bnapi);
11233 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
11235 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
11236 struct hwrm_ring_reset_input *req;
11237 struct bnxt_napi *bnapi = rxr->bnapi;
11238 struct bnxt_cp_ring_info *cpr;
11242 rc = hwrm_req_init(bp, req, HWRM_RING_RESET);
11246 cpr = &bnapi->cp_ring;
11247 cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
11248 req->cmpl_ring = cpu_to_le16(cp_ring_id);
11249 req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
11250 req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
11251 return hwrm_req_send_silent(bp, req);
11254 static void bnxt_reset_task(struct bnxt *bp, bool silent)
11257 bnxt_dbg_dump_states(bp);
11258 if (netif_running(bp->dev)) {
11262 bnxt_close_nic(bp, false, false);
11263 bnxt_open_nic(bp, false, false);
11266 bnxt_close_nic(bp, true, false);
11267 rc = bnxt_open_nic(bp, true, false);
11268 bnxt_ulp_start(bp, rc);
11273 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
11275 struct bnxt *bp = netdev_priv(dev);
11277 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
11278 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
11279 bnxt_queue_sp_work(bp);
11282 static void bnxt_fw_health_check(struct bnxt *bp)
11284 struct bnxt_fw_health *fw_health = bp->fw_health;
11287 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11290 /* Make sure it is enabled before checking the tmr_counter. */
11292 if (fw_health->tmr_counter) {
11293 fw_health->tmr_counter--;
11297 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11298 if (val == fw_health->last_fw_heartbeat)
11301 fw_health->last_fw_heartbeat = val;
11303 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11304 if (val != fw_health->last_fw_reset_cnt)
11307 fw_health->tmr_counter = fw_health->tmr_multiplier;
11311 set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event);
11312 bnxt_queue_sp_work(bp);
11315 static void bnxt_timer(struct timer_list *t)
11317 struct bnxt *bp = from_timer(bp, t, timer);
11318 struct net_device *dev = bp->dev;
11320 if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
11323 if (atomic_read(&bp->intr_sem) != 0)
11324 goto bnxt_restart_timer;
11326 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
11327 bnxt_fw_health_check(bp);
11329 if (bp->link_info.link_up && bp->stats_coal_ticks) {
11330 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
11331 bnxt_queue_sp_work(bp);
11334 if (bnxt_tc_flower_enabled(bp)) {
11335 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
11336 bnxt_queue_sp_work(bp);
11339 #ifdef CONFIG_RFS_ACCEL
11340 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) {
11341 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
11342 bnxt_queue_sp_work(bp);
11344 #endif /*CONFIG_RFS_ACCEL*/
11346 if (bp->link_info.phy_retry) {
11347 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
11348 bp->link_info.phy_retry = false;
11349 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
11351 set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
11352 bnxt_queue_sp_work(bp);
11356 if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
11357 netif_carrier_ok(dev)) {
11358 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
11359 bnxt_queue_sp_work(bp);
11361 bnxt_restart_timer:
11362 mod_timer(&bp->timer, jiffies + bp->current_interval);
11365 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
11367 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
11368 * set. If the device is being closed, bnxt_close() may be holding
11369 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
11370 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
11372 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11376 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
11378 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11382 /* Only called from bnxt_sp_task() */
11383 static void bnxt_reset(struct bnxt *bp, bool silent)
11385 bnxt_rtnl_lock_sp(bp);
11386 if (test_bit(BNXT_STATE_OPEN, &bp->state))
11387 bnxt_reset_task(bp, silent);
11388 bnxt_rtnl_unlock_sp(bp);
11391 /* Only called from bnxt_sp_task() */
11392 static void bnxt_rx_ring_reset(struct bnxt *bp)
11396 bnxt_rtnl_lock_sp(bp);
11397 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11398 bnxt_rtnl_unlock_sp(bp);
11401 /* Disable and flush TPA before resetting the RX ring */
11402 if (bp->flags & BNXT_FLAG_TPA)
11403 bnxt_set_tpa(bp, false);
11404 for (i = 0; i < bp->rx_nr_rings; i++) {
11405 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
11406 struct bnxt_cp_ring_info *cpr;
11409 if (!rxr->bnapi->in_reset)
11412 rc = bnxt_hwrm_rx_ring_reset(bp, i);
11414 if (rc == -EINVAL || rc == -EOPNOTSUPP)
11415 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
11417 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
11419 bnxt_reset_task(bp, true);
11422 bnxt_free_one_rx_ring_skbs(bp, i);
11424 rxr->rx_agg_prod = 0;
11425 rxr->rx_sw_agg_prod = 0;
11426 rxr->rx_next_cons = 0;
11427 rxr->bnapi->in_reset = false;
11428 bnxt_alloc_one_rx_ring(bp, i);
11429 cpr = &rxr->bnapi->cp_ring;
11430 cpr->sw_stats.rx.rx_resets++;
11431 if (bp->flags & BNXT_FLAG_AGG_RINGS)
11432 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
11433 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
11435 if (bp->flags & BNXT_FLAG_TPA)
11436 bnxt_set_tpa(bp, true);
11437 bnxt_rtnl_unlock_sp(bp);
11440 static void bnxt_fw_reset_close(struct bnxt *bp)
11443 /* When firmware is in fatal state, quiesce device and disable
11444 * bus master to prevent any potential bad DMAs before freeing
11447 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
11450 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
11452 bp->fw_reset_min_dsecs = 0;
11453 bnxt_tx_disable(bp);
11454 bnxt_disable_napi(bp);
11455 bnxt_disable_int_sync(bp);
11457 bnxt_clear_int_mode(bp);
11458 pci_disable_device(bp->pdev);
11460 __bnxt_close_nic(bp, true, false);
11461 bnxt_vf_reps_free(bp);
11462 bnxt_clear_int_mode(bp);
11463 bnxt_hwrm_func_drv_unrgtr(bp);
11464 if (pci_is_enabled(bp->pdev))
11465 pci_disable_device(bp->pdev);
11466 bnxt_free_ctx_mem(bp);
11471 static bool is_bnxt_fw_ok(struct bnxt *bp)
11473 struct bnxt_fw_health *fw_health = bp->fw_health;
11474 bool no_heartbeat = false, has_reset = false;
11477 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11478 if (val == fw_health->last_fw_heartbeat)
11479 no_heartbeat = true;
11481 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11482 if (val != fw_health->last_fw_reset_cnt)
11485 if (!no_heartbeat && has_reset)
11491 /* rtnl_lock is acquired before calling this function */
11492 static void bnxt_force_fw_reset(struct bnxt *bp)
11494 struct bnxt_fw_health *fw_health = bp->fw_health;
11495 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
11498 if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
11499 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11503 spin_lock_bh(&ptp->ptp_lock);
11504 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11505 spin_unlock_bh(&ptp->ptp_lock);
11507 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11509 bnxt_fw_reset_close(bp);
11510 wait_dsecs = fw_health->master_func_wait_dsecs;
11511 if (fw_health->master) {
11512 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
11514 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
11516 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
11517 wait_dsecs = fw_health->normal_func_wait_dsecs;
11518 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11521 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
11522 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
11523 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
11526 void bnxt_fw_exception(struct bnxt *bp)
11528 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
11529 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
11530 bnxt_rtnl_lock_sp(bp);
11531 bnxt_force_fw_reset(bp);
11532 bnxt_rtnl_unlock_sp(bp);
11535 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
11538 static int bnxt_get_registered_vfs(struct bnxt *bp)
11540 #ifdef CONFIG_BNXT_SRIOV
11546 rc = bnxt_hwrm_func_qcfg(bp);
11548 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
11551 if (bp->pf.registered_vfs)
11552 return bp->pf.registered_vfs;
11559 void bnxt_fw_reset(struct bnxt *bp)
11561 bnxt_rtnl_lock_sp(bp);
11562 if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
11563 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
11564 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
11568 spin_lock_bh(&ptp->ptp_lock);
11569 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11570 spin_unlock_bh(&ptp->ptp_lock);
11572 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11574 if (bp->pf.active_vfs &&
11575 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
11576 n = bnxt_get_registered_vfs(bp);
11578 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
11580 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11581 dev_close(bp->dev);
11582 goto fw_reset_exit;
11583 } else if (n > 0) {
11584 u16 vf_tmo_dsecs = n * 10;
11586 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
11587 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
11588 bp->fw_reset_state =
11589 BNXT_FW_RESET_STATE_POLL_VF;
11590 bnxt_queue_fw_reset_work(bp, HZ / 10);
11591 goto fw_reset_exit;
11593 bnxt_fw_reset_close(bp);
11594 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11595 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
11598 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11599 tmo = bp->fw_reset_min_dsecs * HZ / 10;
11601 bnxt_queue_fw_reset_work(bp, tmo);
11604 bnxt_rtnl_unlock_sp(bp);
11607 static void bnxt_chk_missed_irq(struct bnxt *bp)
11611 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
11614 for (i = 0; i < bp->cp_nr_rings; i++) {
11615 struct bnxt_napi *bnapi = bp->bnapi[i];
11616 struct bnxt_cp_ring_info *cpr;
11623 cpr = &bnapi->cp_ring;
11624 for (j = 0; j < 2; j++) {
11625 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
11628 if (!cpr2 || cpr2->has_more_work ||
11629 !bnxt_has_work(bp, cpr2))
11632 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
11633 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
11636 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
11637 bnxt_dbg_hwrm_ring_info_get(bp,
11638 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
11639 fw_ring_id, &val[0], &val[1]);
11640 cpr->sw_stats.cmn.missed_irqs++;
11645 static void bnxt_cfg_ntp_filters(struct bnxt *);
11647 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
11649 struct bnxt_link_info *link_info = &bp->link_info;
11651 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
11652 link_info->autoneg = BNXT_AUTONEG_SPEED;
11653 if (bp->hwrm_spec_code >= 0x10201) {
11654 if (link_info->auto_pause_setting &
11655 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
11656 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11658 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11660 link_info->advertising = link_info->auto_link_speeds;
11661 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
11663 link_info->req_link_speed = link_info->force_link_speed;
11664 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
11665 if (link_info->force_pam4_link_speed) {
11666 link_info->req_link_speed =
11667 link_info->force_pam4_link_speed;
11668 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
11670 link_info->req_duplex = link_info->duplex_setting;
11672 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
11673 link_info->req_flow_ctrl =
11674 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
11676 link_info->req_flow_ctrl = link_info->force_pause_setting;
11679 static void bnxt_fw_echo_reply(struct bnxt *bp)
11681 struct bnxt_fw_health *fw_health = bp->fw_health;
11682 struct hwrm_func_echo_response_input *req;
11685 rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE);
11688 req->event_data1 = cpu_to_le32(fw_health->echo_req_data1);
11689 req->event_data2 = cpu_to_le32(fw_health->echo_req_data2);
11690 hwrm_req_send(bp, req);
11693 static void bnxt_sp_task(struct work_struct *work)
11695 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
11697 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11698 smp_mb__after_atomic();
11699 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11700 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11704 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
11705 bnxt_cfg_rx_mode(bp);
11707 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
11708 bnxt_cfg_ntp_filters(bp);
11709 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
11710 bnxt_hwrm_exec_fwd_req(bp);
11711 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
11712 bnxt_hwrm_port_qstats(bp, 0);
11713 bnxt_hwrm_port_qstats_ext(bp, 0);
11714 bnxt_accumulate_all_stats(bp);
11717 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
11720 mutex_lock(&bp->link_lock);
11721 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
11723 bnxt_hwrm_phy_qcaps(bp);
11725 rc = bnxt_update_link(bp, true);
11727 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
11730 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
11732 bnxt_init_ethtool_link_settings(bp);
11733 mutex_unlock(&bp->link_lock);
11735 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
11738 mutex_lock(&bp->link_lock);
11739 rc = bnxt_update_phy_setting(bp);
11740 mutex_unlock(&bp->link_lock);
11742 netdev_warn(bp->dev, "update phy settings retry failed\n");
11744 bp->link_info.phy_retry = false;
11745 netdev_info(bp->dev, "update phy settings retry succeeded\n");
11748 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
11749 mutex_lock(&bp->link_lock);
11750 bnxt_get_port_module_status(bp);
11751 mutex_unlock(&bp->link_lock);
11754 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
11755 bnxt_tc_flow_stats_work(bp);
11757 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
11758 bnxt_chk_missed_irq(bp);
11760 if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
11761 bnxt_fw_echo_reply(bp);
11763 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
11764 * must be the last functions to be called before exiting.
11766 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
11767 bnxt_reset(bp, false);
11769 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
11770 bnxt_reset(bp, true);
11772 if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
11773 bnxt_rx_ring_reset(bp);
11775 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event))
11776 bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT);
11778 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
11779 if (!is_bnxt_fw_ok(bp))
11780 bnxt_devlink_health_report(bp,
11781 BNXT_FW_EXCEPTION_SP_EVENT);
11784 smp_mb__before_atomic();
11785 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11788 /* Under rtnl_lock */
11789 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
11792 int max_rx, max_tx, tx_sets = 1;
11793 int tx_rings_needed, stats;
11800 rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
11807 tx_rings_needed = tx * tx_sets + tx_xdp;
11808 if (max_tx < tx_rings_needed)
11812 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
11815 if (bp->flags & BNXT_FLAG_AGG_RINGS)
11817 cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
11819 if (BNXT_NEW_RM(bp)) {
11820 cp += bnxt_get_ulp_msix_num(bp);
11821 stats += bnxt_get_ulp_stat_ctxs(bp);
11823 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
11827 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
11830 pci_iounmap(pdev, bp->bar2);
11835 pci_iounmap(pdev, bp->bar1);
11840 pci_iounmap(pdev, bp->bar0);
11845 static void bnxt_cleanup_pci(struct bnxt *bp)
11847 bnxt_unmap_bars(bp, bp->pdev);
11848 pci_release_regions(bp->pdev);
11849 if (pci_is_enabled(bp->pdev))
11850 pci_disable_device(bp->pdev);
11853 static void bnxt_init_dflt_coal(struct bnxt *bp)
11855 struct bnxt_coal *coal;
11857 /* Tick values in micro seconds.
11858 * 1 coal_buf x bufs_per_record = 1 completion record.
11860 coal = &bp->rx_coal;
11861 coal->coal_ticks = 10;
11862 coal->coal_bufs = 30;
11863 coal->coal_ticks_irq = 1;
11864 coal->coal_bufs_irq = 2;
11865 coal->idle_thresh = 50;
11866 coal->bufs_per_record = 2;
11867 coal->budget = 64; /* NAPI budget */
11869 coal = &bp->tx_coal;
11870 coal->coal_ticks = 28;
11871 coal->coal_bufs = 30;
11872 coal->coal_ticks_irq = 2;
11873 coal->coal_bufs_irq = 2;
11874 coal->bufs_per_record = 1;
11876 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
11879 static int bnxt_fw_init_one_p1(struct bnxt *bp)
11884 rc = bnxt_hwrm_ver_get(bp);
11885 bnxt_try_map_fw_health_reg(bp);
11887 rc = bnxt_try_recover_fw(bp);
11890 rc = bnxt_hwrm_ver_get(bp);
11895 bnxt_nvm_cfg_ver_get(bp);
11897 rc = bnxt_hwrm_func_reset(bp);
11901 bnxt_hwrm_fw_set_time(bp);
11905 static int bnxt_fw_init_one_p2(struct bnxt *bp)
11909 /* Get the MAX capabilities for this function */
11910 rc = bnxt_hwrm_func_qcaps(bp);
11912 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
11917 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
11919 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
11922 if (bnxt_alloc_fw_health(bp)) {
11923 netdev_warn(bp->dev, "no memory for firmware error recovery\n");
11925 rc = bnxt_hwrm_error_recovery_qcfg(bp);
11927 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
11931 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
11935 bnxt_hwrm_func_qcfg(bp);
11936 bnxt_hwrm_vnic_qcaps(bp);
11937 bnxt_hwrm_port_led_qcaps(bp);
11938 bnxt_ethtool_init(bp);
11943 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
11945 bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
11946 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
11947 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
11948 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
11949 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
11950 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
11951 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
11952 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
11953 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
11957 static void bnxt_set_dflt_rfs(struct bnxt *bp)
11959 struct net_device *dev = bp->dev;
11961 dev->hw_features &= ~NETIF_F_NTUPLE;
11962 dev->features &= ~NETIF_F_NTUPLE;
11963 bp->flags &= ~BNXT_FLAG_RFS;
11964 if (bnxt_rfs_supported(bp)) {
11965 dev->hw_features |= NETIF_F_NTUPLE;
11966 if (bnxt_rfs_capable(bp)) {
11967 bp->flags |= BNXT_FLAG_RFS;
11968 dev->features |= NETIF_F_NTUPLE;
11973 static void bnxt_fw_init_one_p3(struct bnxt *bp)
11975 struct pci_dev *pdev = bp->pdev;
11977 bnxt_set_dflt_rss_hash_type(bp);
11978 bnxt_set_dflt_rfs(bp);
11980 bnxt_get_wol_settings(bp);
11981 if (bp->flags & BNXT_FLAG_WOL_CAP)
11982 device_set_wakeup_enable(&pdev->dev, bp->wol);
11984 device_set_wakeup_capable(&pdev->dev, false);
11986 bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
11987 bnxt_hwrm_coal_params_qcaps(bp);
11990 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
11992 static int bnxt_fw_init_one(struct bnxt *bp)
11996 rc = bnxt_fw_init_one_p1(bp);
11998 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
12001 rc = bnxt_fw_init_one_p2(bp);
12003 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
12006 rc = bnxt_probe_phy(bp, false);
12009 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
12013 /* In case fw capabilities have changed, destroy the unneeded
12014 * reporters and create newly capable ones.
12016 bnxt_dl_fw_reporters_destroy(bp, false);
12017 bnxt_dl_fw_reporters_create(bp);
12018 bnxt_fw_init_one_p3(bp);
12022 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
12024 struct bnxt_fw_health *fw_health = bp->fw_health;
12025 u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
12026 u32 val = fw_health->fw_reset_seq_vals[reg_idx];
12027 u32 reg_type, reg_off, delay_msecs;
12029 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
12030 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
12031 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
12032 switch (reg_type) {
12033 case BNXT_FW_HEALTH_REG_TYPE_CFG:
12034 pci_write_config_dword(bp->pdev, reg_off, val);
12036 case BNXT_FW_HEALTH_REG_TYPE_GRC:
12037 writel(reg_off & BNXT_GRC_BASE_MASK,
12038 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
12039 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
12041 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
12042 writel(val, bp->bar0 + reg_off);
12044 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
12045 writel(val, bp->bar1 + reg_off);
12049 pci_read_config_dword(bp->pdev, 0, &val);
12050 msleep(delay_msecs);
12054 static void bnxt_reset_all(struct bnxt *bp)
12056 struct bnxt_fw_health *fw_health = bp->fw_health;
12059 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
12060 bnxt_fw_reset_via_optee(bp);
12061 bp->fw_reset_timestamp = jiffies;
12065 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
12066 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
12067 bnxt_fw_reset_writel(bp, i);
12068 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
12069 struct hwrm_fw_reset_input *req;
12071 rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
12073 req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG);
12074 req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
12075 req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
12076 req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
12077 rc = hwrm_req_send(bp, req);
12080 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
12082 bp->fw_reset_timestamp = jiffies;
12085 static bool bnxt_fw_reset_timeout(struct bnxt *bp)
12087 return time_after(jiffies, bp->fw_reset_timestamp +
12088 (bp->fw_reset_max_dsecs * HZ / 10));
12091 static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
12093 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12094 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
12095 bnxt_ulp_start(bp, rc);
12096 bnxt_dl_health_status_update(bp, false);
12098 bp->fw_reset_state = 0;
12099 dev_close(bp->dev);
12102 static void bnxt_fw_reset_task(struct work_struct *work)
12104 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
12107 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
12108 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
12112 switch (bp->fw_reset_state) {
12113 case BNXT_FW_RESET_STATE_POLL_VF: {
12114 int n = bnxt_get_registered_vfs(bp);
12118 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
12119 n, jiffies_to_msecs(jiffies -
12120 bp->fw_reset_timestamp));
12121 goto fw_reset_abort;
12122 } else if (n > 0) {
12123 if (bnxt_fw_reset_timeout(bp)) {
12124 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12125 bp->fw_reset_state = 0;
12126 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
12130 bnxt_queue_fw_reset_work(bp, HZ / 10);
12133 bp->fw_reset_timestamp = jiffies;
12135 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
12136 bnxt_fw_reset_abort(bp, rc);
12140 bnxt_fw_reset_close(bp);
12141 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
12142 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
12145 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12146 tmo = bp->fw_reset_min_dsecs * HZ / 10;
12149 bnxt_queue_fw_reset_work(bp, tmo);
12152 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
12155 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12156 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
12157 !bnxt_fw_reset_timeout(bp)) {
12158 bnxt_queue_fw_reset_work(bp, HZ / 5);
12162 if (!bp->fw_health->master) {
12163 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
12165 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12166 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
12169 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
12172 case BNXT_FW_RESET_STATE_RESET_FW:
12173 bnxt_reset_all(bp);
12174 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12175 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
12177 case BNXT_FW_RESET_STATE_ENABLE_DEV:
12178 bnxt_inv_fw_health_reg(bp);
12179 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
12180 !bp->fw_reset_min_dsecs) {
12183 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
12184 if (val == 0xffff) {
12185 if (bnxt_fw_reset_timeout(bp)) {
12186 netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
12188 goto fw_reset_abort;
12190 bnxt_queue_fw_reset_work(bp, HZ / 1000);
12194 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
12195 if (pci_enable_device(bp->pdev)) {
12196 netdev_err(bp->dev, "Cannot re-enable PCI device\n");
12198 goto fw_reset_abort;
12200 pci_set_master(bp->pdev);
12201 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
12203 case BNXT_FW_RESET_STATE_POLL_FW:
12204 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
12205 rc = bnxt_hwrm_poll(bp);
12207 if (bnxt_fw_reset_timeout(bp)) {
12208 netdev_err(bp->dev, "Firmware reset aborted\n");
12209 goto fw_reset_abort_status;
12211 bnxt_queue_fw_reset_work(bp, HZ / 5);
12214 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
12215 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
12217 case BNXT_FW_RESET_STATE_OPENING:
12218 while (!rtnl_trylock()) {
12219 bnxt_queue_fw_reset_work(bp, HZ / 10);
12222 rc = bnxt_open(bp->dev);
12224 netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
12225 bnxt_fw_reset_abort(bp, rc);
12230 if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
12231 bp->fw_health->enabled) {
12232 bp->fw_health->last_fw_reset_cnt =
12233 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
12235 bp->fw_reset_state = 0;
12236 /* Make sure fw_reset_state is 0 before clearing the flag */
12237 smp_mb__before_atomic();
12238 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12239 bnxt_ulp_start(bp, 0);
12240 bnxt_reenable_sriov(bp);
12241 bnxt_vf_reps_alloc(bp);
12242 bnxt_vf_reps_open(bp);
12243 bnxt_ptp_reapply_pps(bp);
12244 bnxt_dl_health_recovery_done(bp);
12245 bnxt_dl_health_status_update(bp, true);
12251 fw_reset_abort_status:
12252 if (bp->fw_health->status_reliable ||
12253 (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
12254 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12256 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
12260 bnxt_fw_reset_abort(bp, rc);
12264 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
12267 struct bnxt *bp = netdev_priv(dev);
12269 SET_NETDEV_DEV(dev, &pdev->dev);
12271 /* enable device (incl. PCI PM wakeup), and bus-mastering */
12272 rc = pci_enable_device(pdev);
12274 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
12278 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12279 dev_err(&pdev->dev,
12280 "Cannot find PCI device base address, aborting\n");
12282 goto init_err_disable;
12285 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12287 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
12288 goto init_err_disable;
12291 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
12292 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
12293 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
12295 goto init_err_release;
12298 pci_set_master(pdev);
12303 /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
12304 * determines the BAR size.
12306 bp->bar0 = pci_ioremap_bar(pdev, 0);
12308 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
12310 goto init_err_release;
12313 bp->bar2 = pci_ioremap_bar(pdev, 4);
12315 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
12317 goto init_err_release;
12320 pci_enable_pcie_error_reporting(pdev);
12322 INIT_WORK(&bp->sp_task, bnxt_sp_task);
12323 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
12325 spin_lock_init(&bp->ntp_fltr_lock);
12326 #if BITS_PER_LONG == 32
12327 spin_lock_init(&bp->db_lock);
12330 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
12331 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
12333 bnxt_init_dflt_coal(bp);
12335 timer_setup(&bp->timer, bnxt_timer, 0);
12336 bp->current_interval = BNXT_TIMER_INTERVAL;
12338 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
12339 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
12341 clear_bit(BNXT_STATE_OPEN, &bp->state);
12345 bnxt_unmap_bars(bp, pdev);
12346 pci_release_regions(pdev);
12349 pci_disable_device(pdev);
12355 /* rtnl_lock held */
12356 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
12358 struct sockaddr *addr = p;
12359 struct bnxt *bp = netdev_priv(dev);
12362 if (!is_valid_ether_addr(addr->sa_data))
12363 return -EADDRNOTAVAIL;
12365 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
12368 rc = bnxt_approve_mac(bp, addr->sa_data, true);
12372 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
12373 if (netif_running(dev)) {
12374 bnxt_close_nic(bp, false, false);
12375 rc = bnxt_open_nic(bp, false, false);
12381 /* rtnl_lock held */
12382 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
12384 struct bnxt *bp = netdev_priv(dev);
12386 if (netif_running(dev))
12387 bnxt_close_nic(bp, true, false);
12389 dev->mtu = new_mtu;
12390 bnxt_set_ring_params(bp);
12392 if (netif_running(dev))
12393 return bnxt_open_nic(bp, true, false);
12398 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
12400 struct bnxt *bp = netdev_priv(dev);
12404 if (tc > bp->max_tc) {
12405 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
12410 if (netdev_get_num_tc(dev) == tc)
12413 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
12416 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
12417 sh, tc, bp->tx_nr_rings_xdp);
12421 /* Needs to close the device and do hw resource re-allocations */
12422 if (netif_running(bp->dev))
12423 bnxt_close_nic(bp, true, false);
12426 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
12427 netdev_set_num_tc(dev, tc);
12429 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12430 netdev_reset_tc(dev);
12432 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
12433 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
12434 bp->tx_nr_rings + bp->rx_nr_rings;
12436 if (netif_running(bp->dev))
12437 return bnxt_open_nic(bp, true, false);
12442 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
12445 struct bnxt *bp = cb_priv;
12447 if (!bnxt_tc_flower_enabled(bp) ||
12448 !tc_cls_can_offload_and_chain0(bp->dev, type_data))
12449 return -EOPNOTSUPP;
12452 case TC_SETUP_CLSFLOWER:
12453 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
12455 return -EOPNOTSUPP;
12459 LIST_HEAD(bnxt_block_cb_list);
12461 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
12464 struct bnxt *bp = netdev_priv(dev);
12467 case TC_SETUP_BLOCK:
12468 return flow_block_cb_setup_simple(type_data,
12469 &bnxt_block_cb_list,
12470 bnxt_setup_tc_block_cb,
12472 case TC_SETUP_QDISC_MQPRIO: {
12473 struct tc_mqprio_qopt *mqprio = type_data;
12475 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
12477 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
12480 return -EOPNOTSUPP;
12484 #ifdef CONFIG_RFS_ACCEL
12485 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
12486 struct bnxt_ntuple_filter *f2)
12488 struct flow_keys *keys1 = &f1->fkeys;
12489 struct flow_keys *keys2 = &f2->fkeys;
12491 if (keys1->basic.n_proto != keys2->basic.n_proto ||
12492 keys1->basic.ip_proto != keys2->basic.ip_proto)
12495 if (keys1->basic.n_proto == htons(ETH_P_IP)) {
12496 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
12497 keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)
12500 if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src,
12501 sizeof(keys1->addrs.v6addrs.src)) ||
12502 memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst,
12503 sizeof(keys1->addrs.v6addrs.dst)))
12507 if (keys1->ports.ports == keys2->ports.ports &&
12508 keys1->control.flags == keys2->control.flags &&
12509 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
12510 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
12516 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
12517 u16 rxq_index, u32 flow_id)
12519 struct bnxt *bp = netdev_priv(dev);
12520 struct bnxt_ntuple_filter *fltr, *new_fltr;
12521 struct flow_keys *fkeys;
12522 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
12523 int rc = 0, idx, bit_id, l2_idx = 0;
12524 struct hlist_head *head;
12527 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
12528 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
12531 netif_addr_lock_bh(dev);
12532 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
12533 if (ether_addr_equal(eth->h_dest,
12534 vnic->uc_list + off)) {
12539 netif_addr_unlock_bh(dev);
12543 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
12547 fkeys = &new_fltr->fkeys;
12548 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
12549 rc = -EPROTONOSUPPORT;
12553 if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
12554 fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
12555 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
12556 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
12557 rc = -EPROTONOSUPPORT;
12560 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
12561 bp->hwrm_spec_code < 0x10601) {
12562 rc = -EPROTONOSUPPORT;
12565 flags = fkeys->control.flags;
12566 if (((flags & FLOW_DIS_ENCAPSULATION) &&
12567 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
12568 rc = -EPROTONOSUPPORT;
12572 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
12573 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
12575 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
12576 head = &bp->ntp_fltr_hash_tbl[idx];
12578 hlist_for_each_entry_rcu(fltr, head, hash) {
12579 if (bnxt_fltr_match(fltr, new_fltr)) {
12587 spin_lock_bh(&bp->ntp_fltr_lock);
12588 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
12589 BNXT_NTP_FLTR_MAX_FLTR, 0);
12591 spin_unlock_bh(&bp->ntp_fltr_lock);
12596 new_fltr->sw_id = (u16)bit_id;
12597 new_fltr->flow_id = flow_id;
12598 new_fltr->l2_fltr_idx = l2_idx;
12599 new_fltr->rxq = rxq_index;
12600 hlist_add_head_rcu(&new_fltr->hash, head);
12601 bp->ntp_fltr_count++;
12602 spin_unlock_bh(&bp->ntp_fltr_lock);
12604 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
12605 bnxt_queue_sp_work(bp);
12607 return new_fltr->sw_id;
12614 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12618 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
12619 struct hlist_head *head;
12620 struct hlist_node *tmp;
12621 struct bnxt_ntuple_filter *fltr;
12624 head = &bp->ntp_fltr_hash_tbl[i];
12625 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
12628 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
12629 if (rps_may_expire_flow(bp->dev, fltr->rxq,
12632 bnxt_hwrm_cfa_ntuple_filter_free(bp,
12637 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
12642 set_bit(BNXT_FLTR_VALID, &fltr->state);
12646 spin_lock_bh(&bp->ntp_fltr_lock);
12647 hlist_del_rcu(&fltr->hash);
12648 bp->ntp_fltr_count--;
12649 spin_unlock_bh(&bp->ntp_fltr_lock);
12651 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
12656 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
12657 netdev_info(bp->dev, "Receive PF driver unload event!\n");
12662 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12666 #endif /* CONFIG_RFS_ACCEL */
12668 static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
12670 struct bnxt *bp = netdev_priv(netdev);
12671 struct udp_tunnel_info ti;
12674 udp_tunnel_nic_get_port(netdev, table, 0, &ti);
12675 if (ti.type == UDP_TUNNEL_TYPE_VXLAN)
12676 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
12678 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
12681 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
12683 return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
12686 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
12687 .sync_table = bnxt_udp_tunnel_sync,
12688 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
12689 UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
12691 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
12692 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
12696 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
12697 struct net_device *dev, u32 filter_mask,
12700 struct bnxt *bp = netdev_priv(dev);
12702 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
12703 nlflags, filter_mask, NULL);
12706 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
12707 u16 flags, struct netlink_ext_ack *extack)
12709 struct bnxt *bp = netdev_priv(dev);
12710 struct nlattr *attr, *br_spec;
12713 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
12714 return -EOPNOTSUPP;
12716 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
12720 nla_for_each_nested(attr, br_spec, rem) {
12723 if (nla_type(attr) != IFLA_BRIDGE_MODE)
12726 if (nla_len(attr) < sizeof(mode))
12729 mode = nla_get_u16(attr);
12730 if (mode == bp->br_mode)
12733 rc = bnxt_hwrm_set_br_mode(bp, mode);
12735 bp->br_mode = mode;
12741 int bnxt_get_port_parent_id(struct net_device *dev,
12742 struct netdev_phys_item_id *ppid)
12744 struct bnxt *bp = netdev_priv(dev);
12746 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
12747 return -EOPNOTSUPP;
12749 /* The PF and it's VF-reps only support the switchdev framework */
12750 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
12751 return -EOPNOTSUPP;
12753 ppid->id_len = sizeof(bp->dsn);
12754 memcpy(ppid->id, bp->dsn, ppid->id_len);
12759 static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev)
12761 struct bnxt *bp = netdev_priv(dev);
12763 return &bp->dl_port;
12766 static const struct net_device_ops bnxt_netdev_ops = {
12767 .ndo_open = bnxt_open,
12768 .ndo_start_xmit = bnxt_start_xmit,
12769 .ndo_stop = bnxt_close,
12770 .ndo_get_stats64 = bnxt_get_stats64,
12771 .ndo_set_rx_mode = bnxt_set_rx_mode,
12772 .ndo_eth_ioctl = bnxt_ioctl,
12773 .ndo_validate_addr = eth_validate_addr,
12774 .ndo_set_mac_address = bnxt_change_mac_addr,
12775 .ndo_change_mtu = bnxt_change_mtu,
12776 .ndo_fix_features = bnxt_fix_features,
12777 .ndo_set_features = bnxt_set_features,
12778 .ndo_features_check = bnxt_features_check,
12779 .ndo_tx_timeout = bnxt_tx_timeout,
12780 #ifdef CONFIG_BNXT_SRIOV
12781 .ndo_get_vf_config = bnxt_get_vf_config,
12782 .ndo_set_vf_mac = bnxt_set_vf_mac,
12783 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
12784 .ndo_set_vf_rate = bnxt_set_vf_bw,
12785 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
12786 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
12787 .ndo_set_vf_trust = bnxt_set_vf_trust,
12789 .ndo_setup_tc = bnxt_setup_tc,
12790 #ifdef CONFIG_RFS_ACCEL
12791 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
12793 .ndo_bpf = bnxt_xdp,
12794 .ndo_xdp_xmit = bnxt_xdp_xmit,
12795 .ndo_bridge_getlink = bnxt_bridge_getlink,
12796 .ndo_bridge_setlink = bnxt_bridge_setlink,
12797 .ndo_get_devlink_port = bnxt_get_devlink_port,
12800 static void bnxt_remove_one(struct pci_dev *pdev)
12802 struct net_device *dev = pci_get_drvdata(pdev);
12803 struct bnxt *bp = netdev_priv(dev);
12806 bnxt_sriov_disable(bp);
12809 devlink_port_type_clear(&bp->dl_port);
12811 bnxt_ptp_clear(bp);
12812 pci_disable_pcie_error_reporting(pdev);
12813 unregister_netdev(dev);
12814 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12815 /* Flush any pending tasks */
12816 cancel_work_sync(&bp->sp_task);
12817 cancel_delayed_work_sync(&bp->fw_reset_task);
12820 bnxt_dl_fw_reporters_destroy(bp, true);
12821 bnxt_dl_unregister(bp);
12822 bnxt_shutdown_tc(bp);
12824 bnxt_clear_int_mode(bp);
12825 bnxt_hwrm_func_drv_unrgtr(bp);
12826 bnxt_free_hwrm_resources(bp);
12827 bnxt_ethtool_free(bp);
12831 kfree(bp->ptp_cfg);
12832 bp->ptp_cfg = NULL;
12833 kfree(bp->fw_health);
12834 bp->fw_health = NULL;
12835 bnxt_cleanup_pci(bp);
12836 bnxt_free_ctx_mem(bp);
12839 kfree(bp->rss_indir_tbl);
12840 bp->rss_indir_tbl = NULL;
12841 bnxt_free_port_stats(bp);
12845 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
12848 struct bnxt_link_info *link_info = &bp->link_info;
12851 rc = bnxt_hwrm_phy_qcaps(bp);
12853 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
12857 if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
12858 bp->dev->priv_flags |= IFF_SUPP_NOFCS;
12860 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
12864 mutex_lock(&bp->link_lock);
12865 rc = bnxt_update_link(bp, false);
12867 mutex_unlock(&bp->link_lock);
12868 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
12873 /* Older firmware does not have supported_auto_speeds, so assume
12874 * that all supported speeds can be autonegotiated.
12876 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
12877 link_info->support_auto_speeds = link_info->support_speeds;
12879 bnxt_init_ethtool_link_settings(bp);
12880 mutex_unlock(&bp->link_lock);
12884 static int bnxt_get_max_irq(struct pci_dev *pdev)
12888 if (!pdev->msix_cap)
12891 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
12892 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
12895 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
12898 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
12899 int max_ring_grps = 0, max_irq;
12901 *max_tx = hw_resc->max_tx_rings;
12902 *max_rx = hw_resc->max_rx_rings;
12903 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
12904 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
12905 bnxt_get_ulp_msix_num(bp),
12906 hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
12907 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
12908 *max_cp = min_t(int, *max_cp, max_irq);
12909 max_ring_grps = hw_resc->max_hw_ring_grps;
12910 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
12914 if (bp->flags & BNXT_FLAG_AGG_RINGS)
12916 if (bp->flags & BNXT_FLAG_CHIP_P5) {
12917 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
12918 /* On P5 chips, max_cp output param should be available NQs */
12921 *max_rx = min_t(int, *max_rx, max_ring_grps);
12924 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
12928 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
12931 if (!rx || !tx || !cp)
12934 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
12937 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
12942 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
12943 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
12944 /* Not enough rings, try disabling agg rings. */
12945 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
12946 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
12948 /* set BNXT_FLAG_AGG_RINGS back for consistency */
12949 bp->flags |= BNXT_FLAG_AGG_RINGS;
12952 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
12953 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12954 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12955 bnxt_set_ring_params(bp);
12958 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
12959 int max_cp, max_stat, max_irq;
12961 /* Reserve minimum resources for RoCE */
12962 max_cp = bnxt_get_max_func_cp_rings(bp);
12963 max_stat = bnxt_get_max_func_stat_ctxs(bp);
12964 max_irq = bnxt_get_max_func_irqs(bp);
12965 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
12966 max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
12967 max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
12970 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
12971 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
12972 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
12973 max_cp = min_t(int, max_cp, max_irq);
12974 max_cp = min_t(int, max_cp, max_stat);
12975 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
12982 /* In initial default shared ring setting, each shared ring must have a
12985 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
12987 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
12988 bp->rx_nr_rings = bp->cp_nr_rings;
12989 bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
12990 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12993 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
12995 int dflt_rings, max_rx_rings, max_tx_rings, rc;
12997 if (!bnxt_can_reserve_rings(bp))
13001 bp->flags |= BNXT_FLAG_SHARED_RINGS;
13002 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
13003 /* Reduce default rings on multi-port cards so that total default
13004 * rings do not exceed CPU count.
13006 if (bp->port_count > 1) {
13008 max_t(int, num_online_cpus() / bp->port_count, 1);
13010 dflt_rings = min_t(int, dflt_rings, max_rings);
13012 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
13015 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
13016 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
13018 bnxt_trim_dflt_sh_rings(bp);
13020 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
13021 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
13023 rc = __bnxt_reserve_rings(bp);
13025 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
13026 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13028 bnxt_trim_dflt_sh_rings(bp);
13030 /* Rings may have been trimmed, re-reserve the trimmed rings. */
13031 if (bnxt_need_reserve_rings(bp)) {
13032 rc = __bnxt_reserve_rings(bp);
13034 netdev_warn(bp->dev, "2nd rings reservation failed.\n");
13035 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13037 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
13042 bp->tx_nr_rings = 0;
13043 bp->rx_nr_rings = 0;
13048 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
13052 if (bp->tx_nr_rings)
13055 bnxt_ulp_irq_stop(bp);
13056 bnxt_clear_int_mode(bp);
13057 rc = bnxt_set_dflt_rings(bp, true);
13059 netdev_err(bp->dev, "Not enough rings available.\n");
13060 goto init_dflt_ring_err;
13062 rc = bnxt_init_int_mode(bp);
13064 goto init_dflt_ring_err;
13066 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13067 if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
13068 bp->flags |= BNXT_FLAG_RFS;
13069 bp->dev->features |= NETIF_F_NTUPLE;
13071 init_dflt_ring_err:
13072 bnxt_ulp_irq_restart(bp, rc);
13076 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
13081 bnxt_hwrm_func_qcaps(bp);
13083 if (netif_running(bp->dev))
13084 __bnxt_close_nic(bp, true, false);
13086 bnxt_ulp_irq_stop(bp);
13087 bnxt_clear_int_mode(bp);
13088 rc = bnxt_init_int_mode(bp);
13089 bnxt_ulp_irq_restart(bp, rc);
13091 if (netif_running(bp->dev)) {
13093 dev_close(bp->dev);
13095 rc = bnxt_open_nic(bp, true, false);
13101 static int bnxt_init_mac_addr(struct bnxt *bp)
13106 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
13108 #ifdef CONFIG_BNXT_SRIOV
13109 struct bnxt_vf_info *vf = &bp->vf;
13110 bool strict_approval = true;
13112 if (is_valid_ether_addr(vf->mac_addr)) {
13113 /* overwrite netdev dev_addr with admin VF MAC */
13114 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
13115 /* Older PF driver or firmware may not approve this
13118 strict_approval = false;
13120 eth_hw_addr_random(bp->dev);
13122 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
13128 static void bnxt_vpd_read_info(struct bnxt *bp)
13130 struct pci_dev *pdev = bp->pdev;
13131 unsigned int vpd_size, kw_len;
13135 vpd_data = pci_vpd_alloc(pdev, &vpd_size);
13136 if (IS_ERR(vpd_data)) {
13137 pci_warn(pdev, "Unable to read VPD\n");
13141 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
13142 PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
13146 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
13147 memcpy(bp->board_partno, &vpd_data[pos], size);
13150 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
13151 PCI_VPD_RO_KEYWORD_SERIALNO,
13156 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
13157 memcpy(bp->board_serialno, &vpd_data[pos], size);
13162 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
13164 struct pci_dev *pdev = bp->pdev;
13167 qword = pci_get_dsn(pdev);
13169 netdev_info(bp->dev, "Unable to read adapter's DSN\n");
13170 return -EOPNOTSUPP;
13173 put_unaligned_le64(qword, dsn);
13175 bp->flags |= BNXT_FLAG_DSN_VALID;
13179 static int bnxt_map_db_bar(struct bnxt *bp)
13183 bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
13189 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
13191 struct net_device *dev;
13195 if (pci_is_bridge(pdev))
13198 /* Clear any pending DMA transactions from crash kernel
13199 * while loading driver in capture kernel.
13201 if (is_kdump_kernel()) {
13202 pci_clear_master(pdev);
13206 max_irqs = bnxt_get_max_irq(pdev);
13207 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
13211 bp = netdev_priv(dev);
13212 bp->msg_enable = BNXT_DEF_MSG_ENABLE;
13213 bnxt_set_max_func_irqs(bp, max_irqs);
13215 if (bnxt_vf_pciid(ent->driver_data))
13216 bp->flags |= BNXT_FLAG_VF;
13218 if (pdev->msix_cap)
13219 bp->flags |= BNXT_FLAG_MSIX_CAP;
13221 rc = bnxt_init_board(pdev, dev);
13223 goto init_err_free;
13225 dev->netdev_ops = &bnxt_netdev_ops;
13226 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
13227 dev->ethtool_ops = &bnxt_ethtool_ops;
13228 pci_set_drvdata(pdev, dev);
13230 rc = bnxt_alloc_hwrm_resources(bp);
13232 goto init_err_pci_clean;
13234 mutex_init(&bp->hwrm_cmd_lock);
13235 mutex_init(&bp->link_lock);
13237 rc = bnxt_fw_init_one_p1(bp);
13239 goto init_err_pci_clean;
13242 bnxt_vpd_read_info(bp);
13244 if (BNXT_CHIP_P5(bp)) {
13245 bp->flags |= BNXT_FLAG_CHIP_P5;
13246 if (BNXT_CHIP_SR2(bp))
13247 bp->flags |= BNXT_FLAG_CHIP_SR2;
13250 rc = bnxt_alloc_rss_indir_tbl(bp);
13252 goto init_err_pci_clean;
13254 rc = bnxt_fw_init_one_p2(bp);
13256 goto init_err_pci_clean;
13258 rc = bnxt_map_db_bar(bp);
13260 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
13262 goto init_err_pci_clean;
13265 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13266 NETIF_F_TSO | NETIF_F_TSO6 |
13267 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
13268 NETIF_F_GSO_IPXIP4 |
13269 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
13270 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
13271 NETIF_F_RXCSUM | NETIF_F_GRO;
13273 if (BNXT_SUPPORTS_TPA(bp))
13274 dev->hw_features |= NETIF_F_LRO;
13276 dev->hw_enc_features =
13277 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13278 NETIF_F_TSO | NETIF_F_TSO6 |
13279 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
13280 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
13281 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
13282 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
13284 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
13285 NETIF_F_GSO_GRE_CSUM;
13286 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
13287 if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
13288 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
13289 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
13290 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
13291 if (BNXT_SUPPORTS_TPA(bp))
13292 dev->hw_features |= NETIF_F_GRO_HW;
13293 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
13294 if (dev->features & NETIF_F_GRO_HW)
13295 dev->features &= ~NETIF_F_LRO;
13296 dev->priv_flags |= IFF_UNICAST_FLT;
13298 #ifdef CONFIG_BNXT_SRIOV
13299 init_waitqueue_head(&bp->sriov_cfg_wait);
13300 mutex_init(&bp->sriov_lock);
13302 if (BNXT_SUPPORTS_TPA(bp)) {
13303 bp->gro_func = bnxt_gro_func_5730x;
13304 if (BNXT_CHIP_P4(bp))
13305 bp->gro_func = bnxt_gro_func_5731x;
13306 else if (BNXT_CHIP_P5(bp))
13307 bp->gro_func = bnxt_gro_func_5750x;
13309 if (!BNXT_CHIP_P4_PLUS(bp))
13310 bp->flags |= BNXT_FLAG_DOUBLE_DB;
13312 rc = bnxt_init_mac_addr(bp);
13314 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
13315 rc = -EADDRNOTAVAIL;
13316 goto init_err_pci_clean;
13320 /* Read the adapter's DSN to use as the eswitch switch_id */
13321 rc = bnxt_pcie_dsn_get(bp, bp->dsn);
13324 /* MTU range: 60 - FW defined max */
13325 dev->min_mtu = ETH_ZLEN;
13326 dev->max_mtu = bp->max_mtu;
13328 rc = bnxt_probe_phy(bp, true);
13330 goto init_err_pci_clean;
13332 bnxt_set_rx_skb_mode(bp, false);
13333 bnxt_set_tpa_flags(bp);
13334 bnxt_set_ring_params(bp);
13335 rc = bnxt_set_dflt_rings(bp, true);
13337 netdev_err(bp->dev, "Not enough rings available.\n");
13339 goto init_err_pci_clean;
13342 bnxt_fw_init_one_p3(bp);
13344 if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
13345 bp->flags |= BNXT_FLAG_STRIP_VLAN;
13347 rc = bnxt_init_int_mode(bp);
13349 goto init_err_pci_clean;
13351 /* No TC has been set yet and rings may have been trimmed due to
13352 * limited MSIX, so we re-initialize the TX rings per TC.
13354 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13359 create_singlethread_workqueue("bnxt_pf_wq");
13361 dev_err(&pdev->dev, "Unable to create workqueue.\n");
13363 goto init_err_pci_clean;
13366 rc = bnxt_init_tc(bp);
13368 netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
13372 bnxt_inv_fw_health_reg(bp);
13373 bnxt_dl_register(bp);
13375 rc = register_netdev(dev);
13377 goto init_err_cleanup;
13380 devlink_port_type_eth_set(&bp->dl_port, bp->dev);
13381 bnxt_dl_fw_reporters_create(bp);
13383 netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
13384 board_info[ent->driver_data].name,
13385 (long)pci_resource_start(pdev, 0), dev->dev_addr);
13386 pcie_print_link_status(pdev);
13388 pci_save_state(pdev);
13392 bnxt_dl_unregister(bp);
13393 bnxt_shutdown_tc(bp);
13394 bnxt_clear_int_mode(bp);
13396 init_err_pci_clean:
13397 bnxt_hwrm_func_drv_unrgtr(bp);
13398 bnxt_free_hwrm_resources(bp);
13399 bnxt_ethtool_free(bp);
13400 bnxt_ptp_clear(bp);
13401 kfree(bp->ptp_cfg);
13402 bp->ptp_cfg = NULL;
13403 kfree(bp->fw_health);
13404 bp->fw_health = NULL;
13405 bnxt_cleanup_pci(bp);
13406 bnxt_free_ctx_mem(bp);
13409 kfree(bp->rss_indir_tbl);
13410 bp->rss_indir_tbl = NULL;
13417 static void bnxt_shutdown(struct pci_dev *pdev)
13419 struct net_device *dev = pci_get_drvdata(pdev);
13426 bp = netdev_priv(dev);
13428 goto shutdown_exit;
13430 if (netif_running(dev))
13433 bnxt_ulp_shutdown(bp);
13434 bnxt_clear_int_mode(bp);
13435 pci_disable_device(pdev);
13437 if (system_state == SYSTEM_POWER_OFF) {
13438 pci_wake_from_d3(pdev, bp->wol);
13439 pci_set_power_state(pdev, PCI_D3hot);
13446 #ifdef CONFIG_PM_SLEEP
13447 static int bnxt_suspend(struct device *device)
13449 struct net_device *dev = dev_get_drvdata(device);
13450 struct bnxt *bp = netdev_priv(dev);
13455 if (netif_running(dev)) {
13456 netif_device_detach(dev);
13457 rc = bnxt_close(dev);
13459 bnxt_hwrm_func_drv_unrgtr(bp);
13460 pci_disable_device(bp->pdev);
13461 bnxt_free_ctx_mem(bp);
13468 static int bnxt_resume(struct device *device)
13470 struct net_device *dev = dev_get_drvdata(device);
13471 struct bnxt *bp = netdev_priv(dev);
13475 rc = pci_enable_device(bp->pdev);
13477 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
13481 pci_set_master(bp->pdev);
13482 if (bnxt_hwrm_ver_get(bp)) {
13486 rc = bnxt_hwrm_func_reset(bp);
13492 rc = bnxt_hwrm_func_qcaps(bp);
13496 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
13501 bnxt_get_wol_settings(bp);
13502 if (netif_running(dev)) {
13503 rc = bnxt_open(dev);
13505 netif_device_attach(dev);
13509 bnxt_ulp_start(bp, rc);
13511 bnxt_reenable_sriov(bp);
13516 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
13517 #define BNXT_PM_OPS (&bnxt_pm_ops)
13521 #define BNXT_PM_OPS NULL
13523 #endif /* CONFIG_PM_SLEEP */
13526 * bnxt_io_error_detected - called when PCI error is detected
13527 * @pdev: Pointer to PCI device
13528 * @state: The current pci connection state
13530 * This function is called after a PCI bus error affecting
13531 * this device has been detected.
13533 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
13534 pci_channel_state_t state)
13536 struct net_device *netdev = pci_get_drvdata(pdev);
13537 struct bnxt *bp = netdev_priv(netdev);
13539 netdev_info(netdev, "PCI I/O error detected\n");
13542 netif_device_detach(netdev);
13546 if (state == pci_channel_io_perm_failure) {
13548 return PCI_ERS_RESULT_DISCONNECT;
13551 if (state == pci_channel_io_frozen)
13552 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
13554 if (netif_running(netdev))
13555 bnxt_close(netdev);
13557 if (pci_is_enabled(pdev))
13558 pci_disable_device(pdev);
13559 bnxt_free_ctx_mem(bp);
13564 /* Request a slot slot reset. */
13565 return PCI_ERS_RESULT_NEED_RESET;
13569 * bnxt_io_slot_reset - called after the pci bus has been reset.
13570 * @pdev: Pointer to PCI device
13572 * Restart the card from scratch, as if from a cold-boot.
13573 * At this point, the card has exprienced a hard reset,
13574 * followed by fixups by BIOS, and has its config space
13575 * set up identically to what it was at cold boot.
13577 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
13579 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
13580 struct net_device *netdev = pci_get_drvdata(pdev);
13581 struct bnxt *bp = netdev_priv(netdev);
13584 netdev_info(bp->dev, "PCI Slot Reset\n");
13588 if (pci_enable_device(pdev)) {
13589 dev_err(&pdev->dev,
13590 "Cannot re-enable PCI device after reset.\n");
13592 pci_set_master(pdev);
13593 /* Upon fatal error, our device internal logic that latches to
13594 * BAR value is getting reset and will restore only upon
13595 * rewritting the BARs.
13597 * As pci_restore_state() does not re-write the BARs if the
13598 * value is same as saved value earlier, driver needs to
13599 * write the BARs to 0 to force restore, in case of fatal error.
13601 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
13603 for (off = PCI_BASE_ADDRESS_0;
13604 off <= PCI_BASE_ADDRESS_5; off += 4)
13605 pci_write_config_dword(bp->pdev, off, 0);
13607 pci_restore_state(pdev);
13608 pci_save_state(pdev);
13610 err = bnxt_hwrm_func_reset(bp);
13612 result = PCI_ERS_RESULT_RECOVERED;
13621 * bnxt_io_resume - called when traffic can start flowing again.
13622 * @pdev: Pointer to PCI device
13624 * This callback is called when the error recovery driver tells
13625 * us that its OK to resume normal operation.
13627 static void bnxt_io_resume(struct pci_dev *pdev)
13629 struct net_device *netdev = pci_get_drvdata(pdev);
13630 struct bnxt *bp = netdev_priv(netdev);
13633 netdev_info(bp->dev, "PCI Slot Resume\n");
13636 err = bnxt_hwrm_func_qcaps(bp);
13637 if (!err && netif_running(netdev))
13638 err = bnxt_open(netdev);
13640 bnxt_ulp_start(bp, err);
13642 bnxt_reenable_sriov(bp);
13643 netif_device_attach(netdev);
13649 static const struct pci_error_handlers bnxt_err_handler = {
13650 .error_detected = bnxt_io_error_detected,
13651 .slot_reset = bnxt_io_slot_reset,
13652 .resume = bnxt_io_resume
13655 static struct pci_driver bnxt_pci_driver = {
13656 .name = DRV_MODULE_NAME,
13657 .id_table = bnxt_pci_tbl,
13658 .probe = bnxt_init_one,
13659 .remove = bnxt_remove_one,
13660 .shutdown = bnxt_shutdown,
13661 .driver.pm = BNXT_PM_OPS,
13662 .err_handler = &bnxt_err_handler,
13663 #if defined(CONFIG_BNXT_SRIOV)
13664 .sriov_configure = bnxt_sriov_configure,
13668 static int __init bnxt_init(void)
13671 return pci_register_driver(&bnxt_pci_driver);
13674 static void __exit bnxt_exit(void)
13676 pci_unregister_driver(&bnxt_pci_driver);
13678 destroy_workqueue(bnxt_pf_wq);
13682 module_init(bnxt_init);
13683 module_exit(bnxt_exit);