dma-mapping: fix 32-bit overflow with CONFIG_ARM_LPAE=n
[platform/kernel/linux-rpi.git] / drivers / net / ethernet / broadcom / bnxt / bnxt.c
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2014-2016 Broadcom Corporation
4  * Copyright (c) 2016-2019 Broadcom Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  */
10
11 #include <linux/module.h>
12
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
35 #include <linux/if.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
40 #include <net/ip.h>
41 #include <net/tcp.h>
42 #include <net/udp.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <net/udp_tunnel.h>
46 #include <linux/workqueue.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/log2.h>
50 #include <linux/aer.h>
51 #include <linux/bitmap.h>
52 #include <linux/cpu_rmap.h>
53 #include <linux/cpumask.h>
54 #include <net/pkt_cls.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <net/page_pool.h>
58
59 #include "bnxt_hsi.h"
60 #include "bnxt.h"
61 #include "bnxt_ulp.h"
62 #include "bnxt_sriov.h"
63 #include "bnxt_ethtool.h"
64 #include "bnxt_dcb.h"
65 #include "bnxt_xdp.h"
66 #include "bnxt_vfr.h"
67 #include "bnxt_tc.h"
68 #include "bnxt_devlink.h"
69 #include "bnxt_debugfs.h"
70
71 #define BNXT_TX_TIMEOUT         (5 * HZ)
72 #define BNXT_DEF_MSG_ENABLE     (NETIF_MSG_DRV | NETIF_MSG_HW)
73
74 MODULE_LICENSE("GPL");
75 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
76
77 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
78 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
79 #define BNXT_RX_COPY_THRESH 256
80
81 #define BNXT_TX_PUSH_THRESH 164
82
83 enum board_idx {
84         BCM57301,
85         BCM57302,
86         BCM57304,
87         BCM57417_NPAR,
88         BCM58700,
89         BCM57311,
90         BCM57312,
91         BCM57402,
92         BCM57404,
93         BCM57406,
94         BCM57402_NPAR,
95         BCM57407,
96         BCM57412,
97         BCM57414,
98         BCM57416,
99         BCM57417,
100         BCM57412_NPAR,
101         BCM57314,
102         BCM57417_SFP,
103         BCM57416_SFP,
104         BCM57404_NPAR,
105         BCM57406_NPAR,
106         BCM57407_SFP,
107         BCM57407_NPAR,
108         BCM57414_NPAR,
109         BCM57416_NPAR,
110         BCM57452,
111         BCM57454,
112         BCM5745x_NPAR,
113         BCM57508,
114         BCM57504,
115         BCM57502,
116         BCM57508_NPAR,
117         BCM57504_NPAR,
118         BCM57502_NPAR,
119         BCM58802,
120         BCM58804,
121         BCM58808,
122         NETXTREME_E_VF,
123         NETXTREME_C_VF,
124         NETXTREME_S_VF,
125         NETXTREME_E_P5_VF,
126 };
127
128 /* indexed by enum above */
129 static const struct {
130         char *name;
131 } board_info[] = {
132         [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
133         [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
134         [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
135         [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
136         [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
137         [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
138         [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
139         [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
140         [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
141         [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
142         [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
143         [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
144         [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
145         [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
146         [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
147         [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
148         [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
149         [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
150         [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
151         [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
152         [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
153         [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
154         [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
155         [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
156         [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
157         [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
158         [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
159         [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
160         [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
161         [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
162         [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
163         [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
164         [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
165         [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
166         [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
167         [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
168         [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
169         [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
170         [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
171         [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
172         [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
173         [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
174 };
175
176 static const struct pci_device_id bnxt_pci_tbl[] = {
177         { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
178         { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
179         { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
180         { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
181         { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
182         { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
183         { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
184         { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
185         { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
186         { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
187         { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
188         { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
189         { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
190         { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
191         { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
192         { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
193         { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
194         { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
195         { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
196         { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
197         { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
198         { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
199         { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
200         { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
201         { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
202         { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
203         { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
204         { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
205         { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
206         { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
207         { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
208         { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
209         { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
210         { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
211         { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
212         { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
213         { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
214         { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
215         { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
216         { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
217         { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
218         { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
219         { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
220         { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
221         { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
222         { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
223 #ifdef CONFIG_BNXT_SRIOV
224         { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
225         { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
226         { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
227         { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
228         { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
229         { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
230         { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
231         { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
232         { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
233         { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
234         { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
235 #endif
236         { 0 }
237 };
238
239 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
240
241 static const u16 bnxt_vf_req_snif[] = {
242         HWRM_FUNC_CFG,
243         HWRM_FUNC_VF_CFG,
244         HWRM_PORT_PHY_QCFG,
245         HWRM_CFA_L2_FILTER_ALLOC,
246 };
247
248 static const u16 bnxt_async_events_arr[] = {
249         ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
250         ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
251         ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
252         ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
253         ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
254         ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
255         ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
256         ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
257         ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
258         ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
259 };
260
261 static struct workqueue_struct *bnxt_pf_wq;
262
263 static bool bnxt_vf_pciid(enum board_idx idx)
264 {
265         return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
266                 idx == NETXTREME_S_VF || idx == NETXTREME_E_P5_VF);
267 }
268
269 #define DB_CP_REARM_FLAGS       (DB_KEY_CP | DB_IDX_VALID)
270 #define DB_CP_FLAGS             (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
271 #define DB_CP_IRQ_DIS_FLAGS     (DB_KEY_CP | DB_IRQ_DIS)
272
273 #define BNXT_CP_DB_IRQ_DIS(db)                                          \
274                 writel(DB_CP_IRQ_DIS_FLAGS, db)
275
276 #define BNXT_DB_CQ(db, idx)                                             \
277         writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
278
279 #define BNXT_DB_NQ_P5(db, idx)                                          \
280         writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell)
281
282 #define BNXT_DB_CQ_ARM(db, idx)                                         \
283         writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
284
285 #define BNXT_DB_NQ_ARM_P5(db, idx)                                      \
286         writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell)
287
288 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
289 {
290         if (bp->flags & BNXT_FLAG_CHIP_P5)
291                 BNXT_DB_NQ_P5(db, idx);
292         else
293                 BNXT_DB_CQ(db, idx);
294 }
295
296 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
297 {
298         if (bp->flags & BNXT_FLAG_CHIP_P5)
299                 BNXT_DB_NQ_ARM_P5(db, idx);
300         else
301                 BNXT_DB_CQ_ARM(db, idx);
302 }
303
304 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
305 {
306         if (bp->flags & BNXT_FLAG_CHIP_P5)
307                 writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx),
308                        db->doorbell);
309         else
310                 BNXT_DB_CQ(db, idx);
311 }
312
313 const u16 bnxt_lhint_arr[] = {
314         TX_BD_FLAGS_LHINT_512_AND_SMALLER,
315         TX_BD_FLAGS_LHINT_512_TO_1023,
316         TX_BD_FLAGS_LHINT_1024_TO_2047,
317         TX_BD_FLAGS_LHINT_1024_TO_2047,
318         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
319         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
320         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
321         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
322         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
323         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
324         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
325         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
326         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
327         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
328         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
329         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
330         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
331         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
332         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
333 };
334
335 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
336 {
337         struct metadata_dst *md_dst = skb_metadata_dst(skb);
338
339         if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
340                 return 0;
341
342         return md_dst->u.port_info.port_id;
343 }
344
345 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
346 {
347         struct bnxt *bp = netdev_priv(dev);
348         struct tx_bd *txbd;
349         struct tx_bd_ext *txbd1;
350         struct netdev_queue *txq;
351         int i;
352         dma_addr_t mapping;
353         unsigned int length, pad = 0;
354         u32 len, free_size, vlan_tag_flags, cfa_action, flags;
355         u16 prod, last_frag;
356         struct pci_dev *pdev = bp->pdev;
357         struct bnxt_tx_ring_info *txr;
358         struct bnxt_sw_tx_bd *tx_buf;
359
360         i = skb_get_queue_mapping(skb);
361         if (unlikely(i >= bp->tx_nr_rings)) {
362                 dev_kfree_skb_any(skb);
363                 return NETDEV_TX_OK;
364         }
365
366         txq = netdev_get_tx_queue(dev, i);
367         txr = &bp->tx_ring[bp->tx_ring_map[i]];
368         prod = txr->tx_prod;
369
370         free_size = bnxt_tx_avail(bp, txr);
371         if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
372                 netif_tx_stop_queue(txq);
373                 return NETDEV_TX_BUSY;
374         }
375
376         length = skb->len;
377         len = skb_headlen(skb);
378         last_frag = skb_shinfo(skb)->nr_frags;
379
380         txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
381
382         txbd->tx_bd_opaque = prod;
383
384         tx_buf = &txr->tx_buf_ring[prod];
385         tx_buf->skb = skb;
386         tx_buf->nr_frags = last_frag;
387
388         vlan_tag_flags = 0;
389         cfa_action = bnxt_xmit_get_cfa_action(skb);
390         if (skb_vlan_tag_present(skb)) {
391                 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
392                                  skb_vlan_tag_get(skb);
393                 /* Currently supports 8021Q, 8021AD vlan offloads
394                  * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
395                  */
396                 if (skb->vlan_proto == htons(ETH_P_8021Q))
397                         vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
398         }
399
400         if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
401                 struct tx_push_buffer *tx_push_buf = txr->tx_push;
402                 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
403                 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
404                 void __iomem *db = txr->tx_db.doorbell;
405                 void *pdata = tx_push_buf->data;
406                 u64 *end;
407                 int j, push_len;
408
409                 /* Set COAL_NOW to be ready quickly for the next push */
410                 tx_push->tx_bd_len_flags_type =
411                         cpu_to_le32((length << TX_BD_LEN_SHIFT) |
412                                         TX_BD_TYPE_LONG_TX_BD |
413                                         TX_BD_FLAGS_LHINT_512_AND_SMALLER |
414                                         TX_BD_FLAGS_COAL_NOW |
415                                         TX_BD_FLAGS_PACKET_END |
416                                         (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
417
418                 if (skb->ip_summed == CHECKSUM_PARTIAL)
419                         tx_push1->tx_bd_hsize_lflags =
420                                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
421                 else
422                         tx_push1->tx_bd_hsize_lflags = 0;
423
424                 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
425                 tx_push1->tx_bd_cfa_action =
426                         cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
427
428                 end = pdata + length;
429                 end = PTR_ALIGN(end, 8) - 1;
430                 *end = 0;
431
432                 skb_copy_from_linear_data(skb, pdata, len);
433                 pdata += len;
434                 for (j = 0; j < last_frag; j++) {
435                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
436                         void *fptr;
437
438                         fptr = skb_frag_address_safe(frag);
439                         if (!fptr)
440                                 goto normal_tx;
441
442                         memcpy(pdata, fptr, skb_frag_size(frag));
443                         pdata += skb_frag_size(frag);
444                 }
445
446                 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
447                 txbd->tx_bd_haddr = txr->data_mapping;
448                 prod = NEXT_TX(prod);
449                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
450                 memcpy(txbd, tx_push1, sizeof(*txbd));
451                 prod = NEXT_TX(prod);
452                 tx_push->doorbell =
453                         cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
454                 txr->tx_prod = prod;
455
456                 tx_buf->is_push = 1;
457                 netdev_tx_sent_queue(txq, skb->len);
458                 wmb();  /* Sync is_push and byte queue before pushing data */
459
460                 push_len = (length + sizeof(*tx_push) + 7) / 8;
461                 if (push_len > 16) {
462                         __iowrite64_copy(db, tx_push_buf, 16);
463                         __iowrite32_copy(db + 4, tx_push_buf + 1,
464                                          (push_len - 16) << 1);
465                 } else {
466                         __iowrite64_copy(db, tx_push_buf, push_len);
467                 }
468
469                 goto tx_done;
470         }
471
472 normal_tx:
473         if (length < BNXT_MIN_PKT_SIZE) {
474                 pad = BNXT_MIN_PKT_SIZE - length;
475                 if (skb_pad(skb, pad)) {
476                         /* SKB already freed. */
477                         tx_buf->skb = NULL;
478                         return NETDEV_TX_OK;
479                 }
480                 length = BNXT_MIN_PKT_SIZE;
481         }
482
483         mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
484
485         if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
486                 dev_kfree_skb_any(skb);
487                 tx_buf->skb = NULL;
488                 return NETDEV_TX_OK;
489         }
490
491         dma_unmap_addr_set(tx_buf, mapping, mapping);
492         flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
493                 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
494
495         txbd->tx_bd_haddr = cpu_to_le64(mapping);
496
497         prod = NEXT_TX(prod);
498         txbd1 = (struct tx_bd_ext *)
499                 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
500
501         txbd1->tx_bd_hsize_lflags = 0;
502         if (skb_is_gso(skb)) {
503                 u32 hdr_len;
504
505                 if (skb->encapsulation)
506                         hdr_len = skb_inner_network_offset(skb) +
507                                 skb_inner_network_header_len(skb) +
508                                 inner_tcp_hdrlen(skb);
509                 else
510                         hdr_len = skb_transport_offset(skb) +
511                                 tcp_hdrlen(skb);
512
513                 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
514                                         TX_BD_FLAGS_T_IPID |
515                                         (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
516                 length = skb_shinfo(skb)->gso_size;
517                 txbd1->tx_bd_mss = cpu_to_le32(length);
518                 length += hdr_len;
519         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
520                 txbd1->tx_bd_hsize_lflags =
521                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
522                 txbd1->tx_bd_mss = 0;
523         }
524
525         length >>= 9;
526         if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
527                 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
528                                      skb->len);
529                 i = 0;
530                 goto tx_dma_error;
531         }
532         flags |= bnxt_lhint_arr[length];
533         txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
534
535         txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
536         txbd1->tx_bd_cfa_action =
537                         cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
538         for (i = 0; i < last_frag; i++) {
539                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
540
541                 prod = NEXT_TX(prod);
542                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
543
544                 len = skb_frag_size(frag);
545                 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
546                                            DMA_TO_DEVICE);
547
548                 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
549                         goto tx_dma_error;
550
551                 tx_buf = &txr->tx_buf_ring[prod];
552                 dma_unmap_addr_set(tx_buf, mapping, mapping);
553
554                 txbd->tx_bd_haddr = cpu_to_le64(mapping);
555
556                 flags = len << TX_BD_LEN_SHIFT;
557                 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
558         }
559
560         flags &= ~TX_BD_LEN;
561         txbd->tx_bd_len_flags_type =
562                 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
563                             TX_BD_FLAGS_PACKET_END);
564
565         netdev_tx_sent_queue(txq, skb->len);
566
567         /* Sync BD data before updating doorbell */
568         wmb();
569
570         prod = NEXT_TX(prod);
571         txr->tx_prod = prod;
572
573         if (!netdev_xmit_more() || netif_xmit_stopped(txq))
574                 bnxt_db_write(bp, &txr->tx_db, prod);
575
576 tx_done:
577
578         if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
579                 if (netdev_xmit_more() && !tx_buf->is_push)
580                         bnxt_db_write(bp, &txr->tx_db, prod);
581
582                 netif_tx_stop_queue(txq);
583
584                 /* netif_tx_stop_queue() must be done before checking
585                  * tx index in bnxt_tx_avail() below, because in
586                  * bnxt_tx_int(), we update tx index before checking for
587                  * netif_tx_queue_stopped().
588                  */
589                 smp_mb();
590                 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
591                         netif_tx_wake_queue(txq);
592         }
593         return NETDEV_TX_OK;
594
595 tx_dma_error:
596         last_frag = i;
597
598         /* start back at beginning and unmap skb */
599         prod = txr->tx_prod;
600         tx_buf = &txr->tx_buf_ring[prod];
601         tx_buf->skb = NULL;
602         dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
603                          skb_headlen(skb), PCI_DMA_TODEVICE);
604         prod = NEXT_TX(prod);
605
606         /* unmap remaining mapped pages */
607         for (i = 0; i < last_frag; i++) {
608                 prod = NEXT_TX(prod);
609                 tx_buf = &txr->tx_buf_ring[prod];
610                 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
611                                skb_frag_size(&skb_shinfo(skb)->frags[i]),
612                                PCI_DMA_TODEVICE);
613         }
614
615         dev_kfree_skb_any(skb);
616         return NETDEV_TX_OK;
617 }
618
619 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
620 {
621         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
622         struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
623         u16 cons = txr->tx_cons;
624         struct pci_dev *pdev = bp->pdev;
625         int i;
626         unsigned int tx_bytes = 0;
627
628         for (i = 0; i < nr_pkts; i++) {
629                 struct bnxt_sw_tx_bd *tx_buf;
630                 struct sk_buff *skb;
631                 int j, last;
632
633                 tx_buf = &txr->tx_buf_ring[cons];
634                 cons = NEXT_TX(cons);
635                 skb = tx_buf->skb;
636                 tx_buf->skb = NULL;
637
638                 if (tx_buf->is_push) {
639                         tx_buf->is_push = 0;
640                         goto next_tx_int;
641                 }
642
643                 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
644                                  skb_headlen(skb), PCI_DMA_TODEVICE);
645                 last = tx_buf->nr_frags;
646
647                 for (j = 0; j < last; j++) {
648                         cons = NEXT_TX(cons);
649                         tx_buf = &txr->tx_buf_ring[cons];
650                         dma_unmap_page(
651                                 &pdev->dev,
652                                 dma_unmap_addr(tx_buf, mapping),
653                                 skb_frag_size(&skb_shinfo(skb)->frags[j]),
654                                 PCI_DMA_TODEVICE);
655                 }
656
657 next_tx_int:
658                 cons = NEXT_TX(cons);
659
660                 tx_bytes += skb->len;
661                 dev_kfree_skb_any(skb);
662         }
663
664         netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
665         txr->tx_cons = cons;
666
667         /* Need to make the tx_cons update visible to bnxt_start_xmit()
668          * before checking for netif_tx_queue_stopped().  Without the
669          * memory barrier, there is a small possibility that bnxt_start_xmit()
670          * will miss it and cause the queue to be stopped forever.
671          */
672         smp_mb();
673
674         if (unlikely(netif_tx_queue_stopped(txq)) &&
675             (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
676                 __netif_tx_lock(txq, smp_processor_id());
677                 if (netif_tx_queue_stopped(txq) &&
678                     bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
679                     txr->dev_state != BNXT_DEV_STATE_CLOSING)
680                         netif_tx_wake_queue(txq);
681                 __netif_tx_unlock(txq);
682         }
683 }
684
685 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
686                                          struct bnxt_rx_ring_info *rxr,
687                                          gfp_t gfp)
688 {
689         struct device *dev = &bp->pdev->dev;
690         struct page *page;
691
692         page = page_pool_dev_alloc_pages(rxr->page_pool);
693         if (!page)
694                 return NULL;
695
696         *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
697                                       DMA_ATTR_WEAK_ORDERING);
698         if (dma_mapping_error(dev, *mapping)) {
699                 page_pool_recycle_direct(rxr->page_pool, page);
700                 return NULL;
701         }
702         *mapping += bp->rx_dma_offset;
703         return page;
704 }
705
706 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
707                                        gfp_t gfp)
708 {
709         u8 *data;
710         struct pci_dev *pdev = bp->pdev;
711
712         data = kmalloc(bp->rx_buf_size, gfp);
713         if (!data)
714                 return NULL;
715
716         *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
717                                         bp->rx_buf_use_size, bp->rx_dir,
718                                         DMA_ATTR_WEAK_ORDERING);
719
720         if (dma_mapping_error(&pdev->dev, *mapping)) {
721                 kfree(data);
722                 data = NULL;
723         }
724         return data;
725 }
726
727 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
728                        u16 prod, gfp_t gfp)
729 {
730         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
731         struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
732         dma_addr_t mapping;
733
734         if (BNXT_RX_PAGE_MODE(bp)) {
735                 struct page *page =
736                         __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
737
738                 if (!page)
739                         return -ENOMEM;
740
741                 rx_buf->data = page;
742                 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
743         } else {
744                 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
745
746                 if (!data)
747                         return -ENOMEM;
748
749                 rx_buf->data = data;
750                 rx_buf->data_ptr = data + bp->rx_offset;
751         }
752         rx_buf->mapping = mapping;
753
754         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
755         return 0;
756 }
757
758 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
759 {
760         u16 prod = rxr->rx_prod;
761         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
762         struct rx_bd *cons_bd, *prod_bd;
763
764         prod_rx_buf = &rxr->rx_buf_ring[prod];
765         cons_rx_buf = &rxr->rx_buf_ring[cons];
766
767         prod_rx_buf->data = data;
768         prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
769
770         prod_rx_buf->mapping = cons_rx_buf->mapping;
771
772         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
773         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
774
775         prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
776 }
777
778 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
779 {
780         u16 next, max = rxr->rx_agg_bmap_size;
781
782         next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
783         if (next >= max)
784                 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
785         return next;
786 }
787
788 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
789                                      struct bnxt_rx_ring_info *rxr,
790                                      u16 prod, gfp_t gfp)
791 {
792         struct rx_bd *rxbd =
793                 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
794         struct bnxt_sw_rx_agg_bd *rx_agg_buf;
795         struct pci_dev *pdev = bp->pdev;
796         struct page *page;
797         dma_addr_t mapping;
798         u16 sw_prod = rxr->rx_sw_agg_prod;
799         unsigned int offset = 0;
800
801         if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
802                 page = rxr->rx_page;
803                 if (!page) {
804                         page = alloc_page(gfp);
805                         if (!page)
806                                 return -ENOMEM;
807                         rxr->rx_page = page;
808                         rxr->rx_page_offset = 0;
809                 }
810                 offset = rxr->rx_page_offset;
811                 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
812                 if (rxr->rx_page_offset == PAGE_SIZE)
813                         rxr->rx_page = NULL;
814                 else
815                         get_page(page);
816         } else {
817                 page = alloc_page(gfp);
818                 if (!page)
819                         return -ENOMEM;
820         }
821
822         mapping = dma_map_page_attrs(&pdev->dev, page, offset,
823                                      BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
824                                      DMA_ATTR_WEAK_ORDERING);
825         if (dma_mapping_error(&pdev->dev, mapping)) {
826                 __free_page(page);
827                 return -EIO;
828         }
829
830         if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
831                 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
832
833         __set_bit(sw_prod, rxr->rx_agg_bmap);
834         rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
835         rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
836
837         rx_agg_buf->page = page;
838         rx_agg_buf->offset = offset;
839         rx_agg_buf->mapping = mapping;
840         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
841         rxbd->rx_bd_opaque = sw_prod;
842         return 0;
843 }
844
845 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
846                                        struct bnxt_cp_ring_info *cpr,
847                                        u16 cp_cons, u16 curr)
848 {
849         struct rx_agg_cmp *agg;
850
851         cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
852         agg = (struct rx_agg_cmp *)
853                 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
854         return agg;
855 }
856
857 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
858                                               struct bnxt_rx_ring_info *rxr,
859                                               u16 agg_id, u16 curr)
860 {
861         struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
862
863         return &tpa_info->agg_arr[curr];
864 }
865
866 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
867                                    u16 start, u32 agg_bufs, bool tpa)
868 {
869         struct bnxt_napi *bnapi = cpr->bnapi;
870         struct bnxt *bp = bnapi->bp;
871         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
872         u16 prod = rxr->rx_agg_prod;
873         u16 sw_prod = rxr->rx_sw_agg_prod;
874         bool p5_tpa = false;
875         u32 i;
876
877         if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
878                 p5_tpa = true;
879
880         for (i = 0; i < agg_bufs; i++) {
881                 u16 cons;
882                 struct rx_agg_cmp *agg;
883                 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
884                 struct rx_bd *prod_bd;
885                 struct page *page;
886
887                 if (p5_tpa)
888                         agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
889                 else
890                         agg = bnxt_get_agg(bp, cpr, idx, start + i);
891                 cons = agg->rx_agg_cmp_opaque;
892                 __clear_bit(cons, rxr->rx_agg_bmap);
893
894                 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
895                         sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
896
897                 __set_bit(sw_prod, rxr->rx_agg_bmap);
898                 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
899                 cons_rx_buf = &rxr->rx_agg_ring[cons];
900
901                 /* It is possible for sw_prod to be equal to cons, so
902                  * set cons_rx_buf->page to NULL first.
903                  */
904                 page = cons_rx_buf->page;
905                 cons_rx_buf->page = NULL;
906                 prod_rx_buf->page = page;
907                 prod_rx_buf->offset = cons_rx_buf->offset;
908
909                 prod_rx_buf->mapping = cons_rx_buf->mapping;
910
911                 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
912
913                 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
914                 prod_bd->rx_bd_opaque = sw_prod;
915
916                 prod = NEXT_RX_AGG(prod);
917                 sw_prod = NEXT_RX_AGG(sw_prod);
918         }
919         rxr->rx_agg_prod = prod;
920         rxr->rx_sw_agg_prod = sw_prod;
921 }
922
923 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
924                                         struct bnxt_rx_ring_info *rxr,
925                                         u16 cons, void *data, u8 *data_ptr,
926                                         dma_addr_t dma_addr,
927                                         unsigned int offset_and_len)
928 {
929         unsigned int payload = offset_and_len >> 16;
930         unsigned int len = offset_and_len & 0xffff;
931         skb_frag_t *frag;
932         struct page *page = data;
933         u16 prod = rxr->rx_prod;
934         struct sk_buff *skb;
935         int off, err;
936
937         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
938         if (unlikely(err)) {
939                 bnxt_reuse_rx_data(rxr, cons, data);
940                 return NULL;
941         }
942         dma_addr -= bp->rx_dma_offset;
943         dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
944                              DMA_ATTR_WEAK_ORDERING);
945         page_pool_release_page(rxr->page_pool, page);
946
947         if (unlikely(!payload))
948                 payload = eth_get_headlen(bp->dev, data_ptr, len);
949
950         skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
951         if (!skb) {
952                 __free_page(page);
953                 return NULL;
954         }
955
956         off = (void *)data_ptr - page_address(page);
957         skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
958         memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
959                payload + NET_IP_ALIGN);
960
961         frag = &skb_shinfo(skb)->frags[0];
962         skb_frag_size_sub(frag, payload);
963         skb_frag_off_add(frag, payload);
964         skb->data_len -= payload;
965         skb->tail += payload;
966
967         return skb;
968 }
969
970 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
971                                    struct bnxt_rx_ring_info *rxr, u16 cons,
972                                    void *data, u8 *data_ptr,
973                                    dma_addr_t dma_addr,
974                                    unsigned int offset_and_len)
975 {
976         u16 prod = rxr->rx_prod;
977         struct sk_buff *skb;
978         int err;
979
980         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
981         if (unlikely(err)) {
982                 bnxt_reuse_rx_data(rxr, cons, data);
983                 return NULL;
984         }
985
986         skb = build_skb(data, 0);
987         dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
988                                bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
989         if (!skb) {
990                 kfree(data);
991                 return NULL;
992         }
993
994         skb_reserve(skb, bp->rx_offset);
995         skb_put(skb, offset_and_len & 0xffff);
996         return skb;
997 }
998
999 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
1000                                      struct bnxt_cp_ring_info *cpr,
1001                                      struct sk_buff *skb, u16 idx,
1002                                      u32 agg_bufs, bool tpa)
1003 {
1004         struct bnxt_napi *bnapi = cpr->bnapi;
1005         struct pci_dev *pdev = bp->pdev;
1006         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1007         u16 prod = rxr->rx_agg_prod;
1008         bool p5_tpa = false;
1009         u32 i;
1010
1011         if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
1012                 p5_tpa = true;
1013
1014         for (i = 0; i < agg_bufs; i++) {
1015                 u16 cons, frag_len;
1016                 struct rx_agg_cmp *agg;
1017                 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1018                 struct page *page;
1019                 dma_addr_t mapping;
1020
1021                 if (p5_tpa)
1022                         agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1023                 else
1024                         agg = bnxt_get_agg(bp, cpr, idx, i);
1025                 cons = agg->rx_agg_cmp_opaque;
1026                 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1027                             RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1028
1029                 cons_rx_buf = &rxr->rx_agg_ring[cons];
1030                 skb_fill_page_desc(skb, i, cons_rx_buf->page,
1031                                    cons_rx_buf->offset, frag_len);
1032                 __clear_bit(cons, rxr->rx_agg_bmap);
1033
1034                 /* It is possible for bnxt_alloc_rx_page() to allocate
1035                  * a sw_prod index that equals the cons index, so we
1036                  * need to clear the cons entry now.
1037                  */
1038                 mapping = cons_rx_buf->mapping;
1039                 page = cons_rx_buf->page;
1040                 cons_rx_buf->page = NULL;
1041
1042                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1043                         struct skb_shared_info *shinfo;
1044                         unsigned int nr_frags;
1045
1046                         shinfo = skb_shinfo(skb);
1047                         nr_frags = --shinfo->nr_frags;
1048                         __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
1049
1050                         dev_kfree_skb(skb);
1051
1052                         cons_rx_buf->page = page;
1053
1054                         /* Update prod since possibly some pages have been
1055                          * allocated already.
1056                          */
1057                         rxr->rx_agg_prod = prod;
1058                         bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1059                         return NULL;
1060                 }
1061
1062                 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1063                                      PCI_DMA_FROMDEVICE,
1064                                      DMA_ATTR_WEAK_ORDERING);
1065
1066                 skb->data_len += frag_len;
1067                 skb->len += frag_len;
1068                 skb->truesize += PAGE_SIZE;
1069
1070                 prod = NEXT_RX_AGG(prod);
1071         }
1072         rxr->rx_agg_prod = prod;
1073         return skb;
1074 }
1075
1076 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1077                                u8 agg_bufs, u32 *raw_cons)
1078 {
1079         u16 last;
1080         struct rx_agg_cmp *agg;
1081
1082         *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1083         last = RING_CMP(*raw_cons);
1084         agg = (struct rx_agg_cmp *)
1085                 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1086         return RX_AGG_CMP_VALID(agg, *raw_cons);
1087 }
1088
1089 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1090                                             unsigned int len,
1091                                             dma_addr_t mapping)
1092 {
1093         struct bnxt *bp = bnapi->bp;
1094         struct pci_dev *pdev = bp->pdev;
1095         struct sk_buff *skb;
1096
1097         skb = napi_alloc_skb(&bnapi->napi, len);
1098         if (!skb)
1099                 return NULL;
1100
1101         dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1102                                 bp->rx_dir);
1103
1104         memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1105                len + NET_IP_ALIGN);
1106
1107         dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1108                                    bp->rx_dir);
1109
1110         skb_put(skb, len);
1111         return skb;
1112 }
1113
1114 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1115                            u32 *raw_cons, void *cmp)
1116 {
1117         struct rx_cmp *rxcmp = cmp;
1118         u32 tmp_raw_cons = *raw_cons;
1119         u8 cmp_type, agg_bufs = 0;
1120
1121         cmp_type = RX_CMP_TYPE(rxcmp);
1122
1123         if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1124                 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1125                             RX_CMP_AGG_BUFS) >>
1126                            RX_CMP_AGG_BUFS_SHIFT;
1127         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1128                 struct rx_tpa_end_cmp *tpa_end = cmp;
1129
1130                 if (bp->flags & BNXT_FLAG_CHIP_P5)
1131                         return 0;
1132
1133                 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1134         }
1135
1136         if (agg_bufs) {
1137                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1138                         return -EBUSY;
1139         }
1140         *raw_cons = tmp_raw_cons;
1141         return 0;
1142 }
1143
1144 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
1145 {
1146         if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
1147                 return;
1148
1149         if (BNXT_PF(bp))
1150                 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
1151         else
1152                 schedule_delayed_work(&bp->fw_reset_task, delay);
1153 }
1154
1155 static void bnxt_queue_sp_work(struct bnxt *bp)
1156 {
1157         if (BNXT_PF(bp))
1158                 queue_work(bnxt_pf_wq, &bp->sp_task);
1159         else
1160                 schedule_work(&bp->sp_task);
1161 }
1162
1163 static void bnxt_cancel_sp_work(struct bnxt *bp)
1164 {
1165         if (BNXT_PF(bp)) {
1166                 flush_workqueue(bnxt_pf_wq);
1167         } else {
1168                 cancel_work_sync(&bp->sp_task);
1169                 cancel_delayed_work_sync(&bp->fw_reset_task);
1170         }
1171 }
1172
1173 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1174 {
1175         if (!rxr->bnapi->in_reset) {
1176                 rxr->bnapi->in_reset = true;
1177                 if (bp->flags & BNXT_FLAG_CHIP_P5)
1178                         set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1179                 else
1180                         set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
1181                 bnxt_queue_sp_work(bp);
1182         }
1183         rxr->rx_next_cons = 0xffff;
1184 }
1185
1186 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1187 {
1188         struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1189         u16 idx = agg_id & MAX_TPA_P5_MASK;
1190
1191         if (test_bit(idx, map->agg_idx_bmap))
1192                 idx = find_first_zero_bit(map->agg_idx_bmap,
1193                                           BNXT_AGG_IDX_BMAP_SIZE);
1194         __set_bit(idx, map->agg_idx_bmap);
1195         map->agg_id_tbl[agg_id] = idx;
1196         return idx;
1197 }
1198
1199 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1200 {
1201         struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1202
1203         __clear_bit(idx, map->agg_idx_bmap);
1204 }
1205
1206 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1207 {
1208         struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1209
1210         return map->agg_id_tbl[agg_id];
1211 }
1212
1213 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1214                            struct rx_tpa_start_cmp *tpa_start,
1215                            struct rx_tpa_start_cmp_ext *tpa_start1)
1216 {
1217         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1218         struct bnxt_tpa_info *tpa_info;
1219         u16 cons, prod, agg_id;
1220         struct rx_bd *prod_bd;
1221         dma_addr_t mapping;
1222
1223         if (bp->flags & BNXT_FLAG_CHIP_P5) {
1224                 agg_id = TPA_START_AGG_ID_P5(tpa_start);
1225                 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1226         } else {
1227                 agg_id = TPA_START_AGG_ID(tpa_start);
1228         }
1229         cons = tpa_start->rx_tpa_start_cmp_opaque;
1230         prod = rxr->rx_prod;
1231         cons_rx_buf = &rxr->rx_buf_ring[cons];
1232         prod_rx_buf = &rxr->rx_buf_ring[prod];
1233         tpa_info = &rxr->rx_tpa[agg_id];
1234
1235         if (unlikely(cons != rxr->rx_next_cons ||
1236                      TPA_START_ERROR(tpa_start))) {
1237                 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1238                             cons, rxr->rx_next_cons,
1239                             TPA_START_ERROR_CODE(tpa_start1));
1240                 bnxt_sched_reset(bp, rxr);
1241                 return;
1242         }
1243         /* Store cfa_code in tpa_info to use in tpa_end
1244          * completion processing.
1245          */
1246         tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1247         prod_rx_buf->data = tpa_info->data;
1248         prod_rx_buf->data_ptr = tpa_info->data_ptr;
1249
1250         mapping = tpa_info->mapping;
1251         prod_rx_buf->mapping = mapping;
1252
1253         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1254
1255         prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1256
1257         tpa_info->data = cons_rx_buf->data;
1258         tpa_info->data_ptr = cons_rx_buf->data_ptr;
1259         cons_rx_buf->data = NULL;
1260         tpa_info->mapping = cons_rx_buf->mapping;
1261
1262         tpa_info->len =
1263                 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1264                                 RX_TPA_START_CMP_LEN_SHIFT;
1265         if (likely(TPA_START_HASH_VALID(tpa_start))) {
1266                 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1267
1268                 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1269                 tpa_info->gso_type = SKB_GSO_TCPV4;
1270                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1271                 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
1272                         tpa_info->gso_type = SKB_GSO_TCPV6;
1273                 tpa_info->rss_hash =
1274                         le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1275         } else {
1276                 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1277                 tpa_info->gso_type = 0;
1278                 if (netif_msg_rx_err(bp))
1279                         netdev_warn(bp->dev, "TPA packet without valid hash\n");
1280         }
1281         tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1282         tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1283         tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1284         tpa_info->agg_count = 0;
1285
1286         rxr->rx_prod = NEXT_RX(prod);
1287         cons = NEXT_RX(cons);
1288         rxr->rx_next_cons = NEXT_RX(cons);
1289         cons_rx_buf = &rxr->rx_buf_ring[cons];
1290
1291         bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1292         rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1293         cons_rx_buf->data = NULL;
1294 }
1295
1296 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1297 {
1298         if (agg_bufs)
1299                 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1300 }
1301
1302 #ifdef CONFIG_INET
1303 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1304 {
1305         struct udphdr *uh = NULL;
1306
1307         if (ip_proto == htons(ETH_P_IP)) {
1308                 struct iphdr *iph = (struct iphdr *)skb->data;
1309
1310                 if (iph->protocol == IPPROTO_UDP)
1311                         uh = (struct udphdr *)(iph + 1);
1312         } else {
1313                 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1314
1315                 if (iph->nexthdr == IPPROTO_UDP)
1316                         uh = (struct udphdr *)(iph + 1);
1317         }
1318         if (uh) {
1319                 if (uh->check)
1320                         skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1321                 else
1322                         skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1323         }
1324 }
1325 #endif
1326
1327 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1328                                            int payload_off, int tcp_ts,
1329                                            struct sk_buff *skb)
1330 {
1331 #ifdef CONFIG_INET
1332         struct tcphdr *th;
1333         int len, nw_off;
1334         u16 outer_ip_off, inner_ip_off, inner_mac_off;
1335         u32 hdr_info = tpa_info->hdr_info;
1336         bool loopback = false;
1337
1338         inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1339         inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1340         outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1341
1342         /* If the packet is an internal loopback packet, the offsets will
1343          * have an extra 4 bytes.
1344          */
1345         if (inner_mac_off == 4) {
1346                 loopback = true;
1347         } else if (inner_mac_off > 4) {
1348                 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1349                                             ETH_HLEN - 2));
1350
1351                 /* We only support inner iPv4/ipv6.  If we don't see the
1352                  * correct protocol ID, it must be a loopback packet where
1353                  * the offsets are off by 4.
1354                  */
1355                 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1356                         loopback = true;
1357         }
1358         if (loopback) {
1359                 /* internal loopback packet, subtract all offsets by 4 */
1360                 inner_ip_off -= 4;
1361                 inner_mac_off -= 4;
1362                 outer_ip_off -= 4;
1363         }
1364
1365         nw_off = inner_ip_off - ETH_HLEN;
1366         skb_set_network_header(skb, nw_off);
1367         if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1368                 struct ipv6hdr *iph = ipv6_hdr(skb);
1369
1370                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1371                 len = skb->len - skb_transport_offset(skb);
1372                 th = tcp_hdr(skb);
1373                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1374         } else {
1375                 struct iphdr *iph = ip_hdr(skb);
1376
1377                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1378                 len = skb->len - skb_transport_offset(skb);
1379                 th = tcp_hdr(skb);
1380                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1381         }
1382
1383         if (inner_mac_off) { /* tunnel */
1384                 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1385                                             ETH_HLEN - 2));
1386
1387                 bnxt_gro_tunnel(skb, proto);
1388         }
1389 #endif
1390         return skb;
1391 }
1392
1393 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1394                                            int payload_off, int tcp_ts,
1395                                            struct sk_buff *skb)
1396 {
1397 #ifdef CONFIG_INET
1398         u16 outer_ip_off, inner_ip_off, inner_mac_off;
1399         u32 hdr_info = tpa_info->hdr_info;
1400         int iphdr_len, nw_off;
1401
1402         inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1403         inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1404         outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1405
1406         nw_off = inner_ip_off - ETH_HLEN;
1407         skb_set_network_header(skb, nw_off);
1408         iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1409                      sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1410         skb_set_transport_header(skb, nw_off + iphdr_len);
1411
1412         if (inner_mac_off) { /* tunnel */
1413                 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1414                                             ETH_HLEN - 2));
1415
1416                 bnxt_gro_tunnel(skb, proto);
1417         }
1418 #endif
1419         return skb;
1420 }
1421
1422 #define BNXT_IPV4_HDR_SIZE      (sizeof(struct iphdr) + sizeof(struct tcphdr))
1423 #define BNXT_IPV6_HDR_SIZE      (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1424
1425 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1426                                            int payload_off, int tcp_ts,
1427                                            struct sk_buff *skb)
1428 {
1429 #ifdef CONFIG_INET
1430         struct tcphdr *th;
1431         int len, nw_off, tcp_opt_len = 0;
1432
1433         if (tcp_ts)
1434                 tcp_opt_len = 12;
1435
1436         if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1437                 struct iphdr *iph;
1438
1439                 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1440                          ETH_HLEN;
1441                 skb_set_network_header(skb, nw_off);
1442                 iph = ip_hdr(skb);
1443                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1444                 len = skb->len - skb_transport_offset(skb);
1445                 th = tcp_hdr(skb);
1446                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1447         } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1448                 struct ipv6hdr *iph;
1449
1450                 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1451                          ETH_HLEN;
1452                 skb_set_network_header(skb, nw_off);
1453                 iph = ipv6_hdr(skb);
1454                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1455                 len = skb->len - skb_transport_offset(skb);
1456                 th = tcp_hdr(skb);
1457                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1458         } else {
1459                 dev_kfree_skb_any(skb);
1460                 return NULL;
1461         }
1462
1463         if (nw_off) /* tunnel */
1464                 bnxt_gro_tunnel(skb, skb->protocol);
1465 #endif
1466         return skb;
1467 }
1468
1469 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1470                                            struct bnxt_tpa_info *tpa_info,
1471                                            struct rx_tpa_end_cmp *tpa_end,
1472                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1473                                            struct sk_buff *skb)
1474 {
1475 #ifdef CONFIG_INET
1476         int payload_off;
1477         u16 segs;
1478
1479         segs = TPA_END_TPA_SEGS(tpa_end);
1480         if (segs == 1)
1481                 return skb;
1482
1483         NAPI_GRO_CB(skb)->count = segs;
1484         skb_shinfo(skb)->gso_size =
1485                 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1486         skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1487         if (bp->flags & BNXT_FLAG_CHIP_P5)
1488                 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1489         else
1490                 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1491         skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1492         if (likely(skb))
1493                 tcp_gro_complete(skb);
1494 #endif
1495         return skb;
1496 }
1497
1498 /* Given the cfa_code of a received packet determine which
1499  * netdev (vf-rep or PF) the packet is destined to.
1500  */
1501 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1502 {
1503         struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1504
1505         /* if vf-rep dev is NULL, the must belongs to the PF */
1506         return dev ? dev : bp->dev;
1507 }
1508
1509 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1510                                            struct bnxt_cp_ring_info *cpr,
1511                                            u32 *raw_cons,
1512                                            struct rx_tpa_end_cmp *tpa_end,
1513                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1514                                            u8 *event)
1515 {
1516         struct bnxt_napi *bnapi = cpr->bnapi;
1517         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1518         u8 *data_ptr, agg_bufs;
1519         unsigned int len;
1520         struct bnxt_tpa_info *tpa_info;
1521         dma_addr_t mapping;
1522         struct sk_buff *skb;
1523         u16 idx = 0, agg_id;
1524         void *data;
1525         bool gro;
1526
1527         if (unlikely(bnapi->in_reset)) {
1528                 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1529
1530                 if (rc < 0)
1531                         return ERR_PTR(-EBUSY);
1532                 return NULL;
1533         }
1534
1535         if (bp->flags & BNXT_FLAG_CHIP_P5) {
1536                 agg_id = TPA_END_AGG_ID_P5(tpa_end);
1537                 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1538                 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1539                 tpa_info = &rxr->rx_tpa[agg_id];
1540                 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1541                         netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1542                                     agg_bufs, tpa_info->agg_count);
1543                         agg_bufs = tpa_info->agg_count;
1544                 }
1545                 tpa_info->agg_count = 0;
1546                 *event |= BNXT_AGG_EVENT;
1547                 bnxt_free_agg_idx(rxr, agg_id);
1548                 idx = agg_id;
1549                 gro = !!(bp->flags & BNXT_FLAG_GRO);
1550         } else {
1551                 agg_id = TPA_END_AGG_ID(tpa_end);
1552                 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1553                 tpa_info = &rxr->rx_tpa[agg_id];
1554                 idx = RING_CMP(*raw_cons);
1555                 if (agg_bufs) {
1556                         if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1557                                 return ERR_PTR(-EBUSY);
1558
1559                         *event |= BNXT_AGG_EVENT;
1560                         idx = NEXT_CMP(idx);
1561                 }
1562                 gro = !!TPA_END_GRO(tpa_end);
1563         }
1564         data = tpa_info->data;
1565         data_ptr = tpa_info->data_ptr;
1566         prefetch(data_ptr);
1567         len = tpa_info->len;
1568         mapping = tpa_info->mapping;
1569
1570         if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1571                 bnxt_abort_tpa(cpr, idx, agg_bufs);
1572                 if (agg_bufs > MAX_SKB_FRAGS)
1573                         netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1574                                     agg_bufs, (int)MAX_SKB_FRAGS);
1575                 return NULL;
1576         }
1577
1578         if (len <= bp->rx_copy_thresh) {
1579                 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1580                 if (!skb) {
1581                         bnxt_abort_tpa(cpr, idx, agg_bufs);
1582                         return NULL;
1583                 }
1584         } else {
1585                 u8 *new_data;
1586                 dma_addr_t new_mapping;
1587
1588                 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1589                 if (!new_data) {
1590                         bnxt_abort_tpa(cpr, idx, agg_bufs);
1591                         return NULL;
1592                 }
1593
1594                 tpa_info->data = new_data;
1595                 tpa_info->data_ptr = new_data + bp->rx_offset;
1596                 tpa_info->mapping = new_mapping;
1597
1598                 skb = build_skb(data, 0);
1599                 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1600                                        bp->rx_buf_use_size, bp->rx_dir,
1601                                        DMA_ATTR_WEAK_ORDERING);
1602
1603                 if (!skb) {
1604                         kfree(data);
1605                         bnxt_abort_tpa(cpr, idx, agg_bufs);
1606                         return NULL;
1607                 }
1608                 skb_reserve(skb, bp->rx_offset);
1609                 skb_put(skb, len);
1610         }
1611
1612         if (agg_bufs) {
1613                 skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
1614                 if (!skb) {
1615                         /* Page reuse already handled by bnxt_rx_pages(). */
1616                         return NULL;
1617                 }
1618         }
1619
1620         skb->protocol =
1621                 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
1622
1623         if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1624                 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1625
1626         if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1627             (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1628                 u16 vlan_proto = tpa_info->metadata >>
1629                         RX_CMP_FLAGS2_METADATA_TPID_SFT;
1630                 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1631
1632                 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1633         }
1634
1635         skb_checksum_none_assert(skb);
1636         if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1637                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1638                 skb->csum_level =
1639                         (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1640         }
1641
1642         if (gro)
1643                 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1644
1645         return skb;
1646 }
1647
1648 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1649                          struct rx_agg_cmp *rx_agg)
1650 {
1651         u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1652         struct bnxt_tpa_info *tpa_info;
1653
1654         agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1655         tpa_info = &rxr->rx_tpa[agg_id];
1656         BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1657         tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1658 }
1659
1660 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1661                              struct sk_buff *skb)
1662 {
1663         if (skb->dev != bp->dev) {
1664                 /* this packet belongs to a vf-rep */
1665                 bnxt_vf_rep_rx(bp, skb);
1666                 return;
1667         }
1668         skb_record_rx_queue(skb, bnapi->index);
1669         napi_gro_receive(&bnapi->napi, skb);
1670 }
1671
1672 /* returns the following:
1673  * 1       - 1 packet successfully received
1674  * 0       - successful TPA_START, packet not completed yet
1675  * -EBUSY  - completion ring does not have all the agg buffers yet
1676  * -ENOMEM - packet aborted due to out of memory
1677  * -EIO    - packet aborted due to hw error indicated in BD
1678  */
1679 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1680                        u32 *raw_cons, u8 *event)
1681 {
1682         struct bnxt_napi *bnapi = cpr->bnapi;
1683         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1684         struct net_device *dev = bp->dev;
1685         struct rx_cmp *rxcmp;
1686         struct rx_cmp_ext *rxcmp1;
1687         u32 tmp_raw_cons = *raw_cons;
1688         u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1689         struct bnxt_sw_rx_bd *rx_buf;
1690         unsigned int len;
1691         u8 *data_ptr, agg_bufs, cmp_type;
1692         dma_addr_t dma_addr;
1693         struct sk_buff *skb;
1694         void *data;
1695         int rc = 0;
1696         u32 misc;
1697
1698         rxcmp = (struct rx_cmp *)
1699                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1700
1701         cmp_type = RX_CMP_TYPE(rxcmp);
1702
1703         if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
1704                 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
1705                 goto next_rx_no_prod_no_len;
1706         }
1707
1708         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1709         cp_cons = RING_CMP(tmp_raw_cons);
1710         rxcmp1 = (struct rx_cmp_ext *)
1711                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1712
1713         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1714                 return -EBUSY;
1715
1716         prod = rxr->rx_prod;
1717
1718         if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1719                 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1720                                (struct rx_tpa_start_cmp_ext *)rxcmp1);
1721
1722                 *event |= BNXT_RX_EVENT;
1723                 goto next_rx_no_prod_no_len;
1724
1725         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1726                 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
1727                                    (struct rx_tpa_end_cmp *)rxcmp,
1728                                    (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
1729
1730                 if (IS_ERR(skb))
1731                         return -EBUSY;
1732
1733                 rc = -ENOMEM;
1734                 if (likely(skb)) {
1735                         bnxt_deliver_skb(bp, bnapi, skb);
1736                         rc = 1;
1737                 }
1738                 *event |= BNXT_RX_EVENT;
1739                 goto next_rx_no_prod_no_len;
1740         }
1741
1742         cons = rxcmp->rx_cmp_opaque;
1743         if (unlikely(cons != rxr->rx_next_cons)) {
1744                 int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
1745
1746                 /* 0xffff is forced error, don't print it */
1747                 if (rxr->rx_next_cons != 0xffff)
1748                         netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1749                                     cons, rxr->rx_next_cons);
1750                 bnxt_sched_reset(bp, rxr);
1751                 return rc1;
1752         }
1753         rx_buf = &rxr->rx_buf_ring[cons];
1754         data = rx_buf->data;
1755         data_ptr = rx_buf->data_ptr;
1756         prefetch(data_ptr);
1757
1758         misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1759         agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
1760
1761         if (agg_bufs) {
1762                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1763                         return -EBUSY;
1764
1765                 cp_cons = NEXT_CMP(cp_cons);
1766                 *event |= BNXT_AGG_EVENT;
1767         }
1768         *event |= BNXT_RX_EVENT;
1769
1770         rx_buf->data = NULL;
1771         if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1772                 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1773
1774                 bnxt_reuse_rx_data(rxr, cons, data);
1775                 if (agg_bufs)
1776                         bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
1777                                                false);
1778
1779                 rc = -EIO;
1780                 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
1781                         bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
1782                         if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
1783                             !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
1784                                 netdev_warn_once(bp->dev, "RX buffer error %x\n",
1785                                                  rx_err);
1786                                 bnxt_sched_reset(bp, rxr);
1787                         }
1788                 }
1789                 goto next_rx_no_len;
1790         }
1791
1792         len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
1793         dma_addr = rx_buf->mapping;
1794
1795         if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1796                 rc = 1;
1797                 goto next_rx;
1798         }
1799
1800         if (len <= bp->rx_copy_thresh) {
1801                 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
1802                 bnxt_reuse_rx_data(rxr, cons, data);
1803                 if (!skb) {
1804                         if (agg_bufs)
1805                                 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
1806                                                        agg_bufs, false);
1807                         rc = -ENOMEM;
1808                         goto next_rx;
1809                 }
1810         } else {
1811                 u32 payload;
1812
1813                 if (rx_buf->data_ptr == data_ptr)
1814                         payload = misc & RX_CMP_PAYLOAD_OFFSET;
1815                 else
1816                         payload = 0;
1817                 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
1818                                       payload | len);
1819                 if (!skb) {
1820                         rc = -ENOMEM;
1821                         goto next_rx;
1822                 }
1823         }
1824
1825         if (agg_bufs) {
1826                 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
1827                 if (!skb) {
1828                         rc = -ENOMEM;
1829                         goto next_rx;
1830                 }
1831         }
1832
1833         if (RX_CMP_HASH_VALID(rxcmp)) {
1834                 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1835                 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1836
1837                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1838                 if (hash_type != 1 && hash_type != 3)
1839                         type = PKT_HASH_TYPE_L3;
1840                 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1841         }
1842
1843         cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1844         skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
1845
1846         if ((rxcmp1->rx_cmp_flags2 &
1847              cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1848             (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1849                 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1850                 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1851                 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1852
1853                 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1854         }
1855
1856         skb_checksum_none_assert(skb);
1857         if (RX_CMP_L4_CS_OK(rxcmp1)) {
1858                 if (dev->features & NETIF_F_RXCSUM) {
1859                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1860                         skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1861                 }
1862         } else {
1863                 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1864                         if (dev->features & NETIF_F_RXCSUM)
1865                                 bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++;
1866                 }
1867         }
1868
1869         bnxt_deliver_skb(bp, bnapi, skb);
1870         rc = 1;
1871
1872 next_rx:
1873         cpr->rx_packets += 1;
1874         cpr->rx_bytes += len;
1875
1876 next_rx_no_len:
1877         rxr->rx_prod = NEXT_RX(prod);
1878         rxr->rx_next_cons = NEXT_RX(cons);
1879
1880 next_rx_no_prod_no_len:
1881         *raw_cons = tmp_raw_cons;
1882
1883         return rc;
1884 }
1885
1886 /* In netpoll mode, if we are using a combined completion ring, we need to
1887  * discard the rx packets and recycle the buffers.
1888  */
1889 static int bnxt_force_rx_discard(struct bnxt *bp,
1890                                  struct bnxt_cp_ring_info *cpr,
1891                                  u32 *raw_cons, u8 *event)
1892 {
1893         u32 tmp_raw_cons = *raw_cons;
1894         struct rx_cmp_ext *rxcmp1;
1895         struct rx_cmp *rxcmp;
1896         u16 cp_cons;
1897         u8 cmp_type;
1898
1899         cp_cons = RING_CMP(tmp_raw_cons);
1900         rxcmp = (struct rx_cmp *)
1901                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1902
1903         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1904         cp_cons = RING_CMP(tmp_raw_cons);
1905         rxcmp1 = (struct rx_cmp_ext *)
1906                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1907
1908         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1909                 return -EBUSY;
1910
1911         cmp_type = RX_CMP_TYPE(rxcmp);
1912         if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1913                 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1914                         cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1915         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1916                 struct rx_tpa_end_cmp_ext *tpa_end1;
1917
1918                 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
1919                 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
1920                         cpu_to_le32(RX_TPA_END_CMP_ERRORS);
1921         }
1922         return bnxt_rx_pkt(bp, cpr, raw_cons, event);
1923 }
1924
1925 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
1926 {
1927         struct bnxt_fw_health *fw_health = bp->fw_health;
1928         u32 reg = fw_health->regs[reg_idx];
1929         u32 reg_type, reg_off, val = 0;
1930
1931         reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
1932         reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
1933         switch (reg_type) {
1934         case BNXT_FW_HEALTH_REG_TYPE_CFG:
1935                 pci_read_config_dword(bp->pdev, reg_off, &val);
1936                 break;
1937         case BNXT_FW_HEALTH_REG_TYPE_GRC:
1938                 reg_off = fw_health->mapped_regs[reg_idx];
1939                 fallthrough;
1940         case BNXT_FW_HEALTH_REG_TYPE_BAR0:
1941                 val = readl(bp->bar0 + reg_off);
1942                 break;
1943         case BNXT_FW_HEALTH_REG_TYPE_BAR1:
1944                 val = readl(bp->bar1 + reg_off);
1945                 break;
1946         }
1947         if (reg_idx == BNXT_FW_RESET_INPROG_REG)
1948                 val &= fw_health->fw_reset_inprog_reg_mask;
1949         return val;
1950 }
1951
1952 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
1953 {
1954         int i;
1955
1956         for (i = 0; i < bp->rx_nr_rings; i++) {
1957                 u16 grp_idx = bp->rx_ring[i].bnapi->index;
1958                 struct bnxt_ring_grp_info *grp_info;
1959
1960                 grp_info = &bp->grp_info[grp_idx];
1961                 if (grp_info->agg_fw_ring_id == ring_id)
1962                         return grp_idx;
1963         }
1964         return INVALID_HW_RING_ID;
1965 }
1966
1967 #define BNXT_GET_EVENT_PORT(data)       \
1968         ((data) &                       \
1969          ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
1970
1971 #define BNXT_EVENT_RING_TYPE(data2)     \
1972         ((data2) &                      \
1973          ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
1974
1975 #define BNXT_EVENT_RING_TYPE_RX(data2)  \
1976         (BNXT_EVENT_RING_TYPE(data2) == \
1977          ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
1978
1979 static int bnxt_async_event_process(struct bnxt *bp,
1980                                     struct hwrm_async_event_cmpl *cmpl)
1981 {
1982         u16 event_id = le16_to_cpu(cmpl->event_id);
1983         u32 data1 = le32_to_cpu(cmpl->event_data1);
1984         u32 data2 = le32_to_cpu(cmpl->event_data2);
1985
1986         /* TODO CHIMP_FW: Define event id's for link change, error etc */
1987         switch (event_id) {
1988         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
1989                 struct bnxt_link_info *link_info = &bp->link_info;
1990
1991                 if (BNXT_VF(bp))
1992                         goto async_event_process_exit;
1993
1994                 /* print unsupported speed warning in forced speed mode only */
1995                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
1996                     (data1 & 0x20000)) {
1997                         u16 fw_speed = link_info->force_link_speed;
1998                         u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
1999
2000                         if (speed != SPEED_UNKNOWN)
2001                                 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2002                                             speed);
2003                 }
2004                 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
2005         }
2006                 fallthrough;
2007         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2008         case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2009                 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
2010                 fallthrough;
2011         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
2012                 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
2013                 break;
2014         case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
2015                 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
2016                 break;
2017         case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
2018                 u16 port_id = BNXT_GET_EVENT_PORT(data1);
2019
2020                 if (BNXT_VF(bp))
2021                         break;
2022
2023                 if (bp->pf.port_id != port_id)
2024                         break;
2025
2026                 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2027                 break;
2028         }
2029         case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
2030                 if (BNXT_PF(bp))
2031                         goto async_event_process_exit;
2032                 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2033                 break;
2034         case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY:
2035                 if (netif_msg_hw(bp))
2036                         netdev_warn(bp->dev, "Received RESET_NOTIFY event, data1: 0x%x, data2: 0x%x\n",
2037                                     data1, data2);
2038                 if (!bp->fw_health)
2039                         goto async_event_process_exit;
2040
2041                 bp->fw_reset_timestamp = jiffies;
2042                 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2043                 if (!bp->fw_reset_min_dsecs)
2044                         bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2045                 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2046                 if (!bp->fw_reset_max_dsecs)
2047                         bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2048                 if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2049                         netdev_warn(bp->dev, "Firmware fatal reset event received\n");
2050                         set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2051                 } else {
2052                         netdev_warn(bp->dev, "Firmware non-fatal reset event received, max wait time %d msec\n",
2053                                     bp->fw_reset_max_dsecs * 100);
2054                 }
2055                 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2056                 break;
2057         case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2058                 struct bnxt_fw_health *fw_health = bp->fw_health;
2059
2060                 if (!fw_health)
2061                         goto async_event_process_exit;
2062
2063                 fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1);
2064                 fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2065                 if (!fw_health->enabled)
2066                         break;
2067
2068                 if (netif_msg_drv(bp))
2069                         netdev_info(bp->dev, "Error recovery info: error recovery[%d], master[%d], reset count[0x%x], health status: 0x%x\n",
2070                                     fw_health->enabled, fw_health->master,
2071                                     bnxt_fw_health_readl(bp,
2072                                                          BNXT_FW_RESET_CNT_REG),
2073                                     bnxt_fw_health_readl(bp,
2074                                                          BNXT_FW_HEALTH_REG));
2075                 fw_health->tmr_multiplier =
2076                         DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2077                                      bp->current_interval * 10);
2078                 fw_health->tmr_counter = fw_health->tmr_multiplier;
2079                 fw_health->last_fw_heartbeat =
2080                         bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2081                 fw_health->last_fw_reset_cnt =
2082                         bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2083                 goto async_event_process_exit;
2084         }
2085         case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
2086                 struct bnxt_rx_ring_info *rxr;
2087                 u16 grp_idx;
2088
2089                 if (bp->flags & BNXT_FLAG_CHIP_P5)
2090                         goto async_event_process_exit;
2091
2092                 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2093                             BNXT_EVENT_RING_TYPE(data2), data1);
2094                 if (!BNXT_EVENT_RING_TYPE_RX(data2))
2095                         goto async_event_process_exit;
2096
2097                 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2098                 if (grp_idx == INVALID_HW_RING_ID) {
2099                         netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2100                                     data1);
2101                         goto async_event_process_exit;
2102                 }
2103                 rxr = bp->bnapi[grp_idx]->rx_ring;
2104                 bnxt_sched_reset(bp, rxr);
2105                 goto async_event_process_exit;
2106         }
2107         default:
2108                 goto async_event_process_exit;
2109         }
2110         bnxt_queue_sp_work(bp);
2111 async_event_process_exit:
2112         bnxt_ulp_async_events(bp, cmpl);
2113         return 0;
2114 }
2115
2116 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2117 {
2118         u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2119         struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2120         struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2121                                 (struct hwrm_fwd_req_cmpl *)txcmp;
2122
2123         switch (cmpl_type) {
2124         case CMPL_BASE_TYPE_HWRM_DONE:
2125                 seq_id = le16_to_cpu(h_cmpl->sequence_id);
2126                 if (seq_id == bp->hwrm_intr_seq_id)
2127                         bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id;
2128                 else
2129                         netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
2130                 break;
2131
2132         case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2133                 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2134
2135                 if ((vf_id < bp->pf.first_vf_id) ||
2136                     (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2137                         netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2138                                    vf_id);
2139                         return -EINVAL;
2140                 }
2141
2142                 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2143                 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
2144                 bnxt_queue_sp_work(bp);
2145                 break;
2146
2147         case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2148                 bnxt_async_event_process(bp,
2149                                          (struct hwrm_async_event_cmpl *)txcmp);
2150
2151         default:
2152                 break;
2153         }
2154
2155         return 0;
2156 }
2157
2158 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2159 {
2160         struct bnxt_napi *bnapi = dev_instance;
2161         struct bnxt *bp = bnapi->bp;
2162         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2163         u32 cons = RING_CMP(cpr->cp_raw_cons);
2164
2165         cpr->event_ctr++;
2166         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2167         napi_schedule(&bnapi->napi);
2168         return IRQ_HANDLED;
2169 }
2170
2171 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2172 {
2173         u32 raw_cons = cpr->cp_raw_cons;
2174         u16 cons = RING_CMP(raw_cons);
2175         struct tx_cmp *txcmp;
2176
2177         txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2178
2179         return TX_CMP_VALID(txcmp, raw_cons);
2180 }
2181
2182 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
2183 {
2184         struct bnxt_napi *bnapi = dev_instance;
2185         struct bnxt *bp = bnapi->bp;
2186         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2187         u32 cons = RING_CMP(cpr->cp_raw_cons);
2188         u32 int_status;
2189
2190         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2191
2192         if (!bnxt_has_work(bp, cpr)) {
2193                 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
2194                 /* return if erroneous interrupt */
2195                 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
2196                         return IRQ_NONE;
2197         }
2198
2199         /* disable ring IRQ */
2200         BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
2201
2202         /* Return here if interrupt is shared and is disabled. */
2203         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2204                 return IRQ_HANDLED;
2205
2206         napi_schedule(&bnapi->napi);
2207         return IRQ_HANDLED;
2208 }
2209
2210 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2211                             int budget)
2212 {
2213         struct bnxt_napi *bnapi = cpr->bnapi;
2214         u32 raw_cons = cpr->cp_raw_cons;
2215         u32 cons;
2216         int tx_pkts = 0;
2217         int rx_pkts = 0;
2218         u8 event = 0;
2219         struct tx_cmp *txcmp;
2220
2221         cpr->has_more_work = 0;
2222         cpr->had_work_done = 1;
2223         while (1) {
2224                 int rc;
2225
2226                 cons = RING_CMP(raw_cons);
2227                 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2228
2229                 if (!TX_CMP_VALID(txcmp, raw_cons))
2230                         break;
2231
2232                 /* The valid test of the entry must be done first before
2233                  * reading any further.
2234                  */
2235                 dma_rmb();
2236                 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
2237                         tx_pkts++;
2238                         /* return full budget so NAPI will complete. */
2239                         if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
2240                                 rx_pkts = budget;
2241                                 raw_cons = NEXT_RAW_CMP(raw_cons);
2242                                 if (budget)
2243                                         cpr->has_more_work = 1;
2244                                 break;
2245                         }
2246                 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2247                         if (likely(budget))
2248                                 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2249                         else
2250                                 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2251                                                            &event);
2252                         if (likely(rc >= 0))
2253                                 rx_pkts += rc;
2254                         /* Increment rx_pkts when rc is -ENOMEM to count towards
2255                          * the NAPI budget.  Otherwise, we may potentially loop
2256                          * here forever if we consistently cannot allocate
2257                          * buffers.
2258                          */
2259                         else if (rc == -ENOMEM && budget)
2260                                 rx_pkts++;
2261                         else if (rc == -EBUSY)  /* partial completion */
2262                                 break;
2263                 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
2264                                      CMPL_BASE_TYPE_HWRM_DONE) ||
2265                                     (TX_CMP_TYPE(txcmp) ==
2266                                      CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
2267                                     (TX_CMP_TYPE(txcmp) ==
2268                                      CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
2269                         bnxt_hwrm_handler(bp, txcmp);
2270                 }
2271                 raw_cons = NEXT_RAW_CMP(raw_cons);
2272
2273                 if (rx_pkts && rx_pkts == budget) {
2274                         cpr->has_more_work = 1;
2275                         break;
2276                 }
2277         }
2278
2279         if (event & BNXT_REDIRECT_EVENT)
2280                 xdp_do_flush_map();
2281
2282         if (event & BNXT_TX_EVENT) {
2283                 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
2284                 u16 prod = txr->tx_prod;
2285
2286                 /* Sync BD data before updating doorbell */
2287                 wmb();
2288
2289                 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
2290         }
2291
2292         cpr->cp_raw_cons = raw_cons;
2293         bnapi->tx_pkts += tx_pkts;
2294         bnapi->events |= event;
2295         return rx_pkts;
2296 }
2297
2298 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
2299 {
2300         if (bnapi->tx_pkts) {
2301                 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
2302                 bnapi->tx_pkts = 0;
2303         }
2304
2305         if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
2306                 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2307
2308                 if (bnapi->events & BNXT_AGG_EVENT)
2309                         bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2310                 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2311         }
2312         bnapi->events = 0;
2313 }
2314
2315 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2316                           int budget)
2317 {
2318         struct bnxt_napi *bnapi = cpr->bnapi;
2319         int rx_pkts;
2320
2321         rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2322
2323         /* ACK completion ring before freeing tx ring and producing new
2324          * buffers in rx/agg rings to prevent overflowing the completion
2325          * ring.
2326          */
2327         bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2328
2329         __bnxt_poll_work_done(bp, bnapi);
2330         return rx_pkts;
2331 }
2332
2333 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2334 {
2335         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2336         struct bnxt *bp = bnapi->bp;
2337         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2338         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2339         struct tx_cmp *txcmp;
2340         struct rx_cmp_ext *rxcmp1;
2341         u32 cp_cons, tmp_raw_cons;
2342         u32 raw_cons = cpr->cp_raw_cons;
2343         u32 rx_pkts = 0;
2344         u8 event = 0;
2345
2346         while (1) {
2347                 int rc;
2348
2349                 cp_cons = RING_CMP(raw_cons);
2350                 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2351
2352                 if (!TX_CMP_VALID(txcmp, raw_cons))
2353                         break;
2354
2355                 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2356                         tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2357                         cp_cons = RING_CMP(tmp_raw_cons);
2358                         rxcmp1 = (struct rx_cmp_ext *)
2359                           &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2360
2361                         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2362                                 break;
2363
2364                         /* force an error to recycle the buffer */
2365                         rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2366                                 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2367
2368                         rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2369                         if (likely(rc == -EIO) && budget)
2370                                 rx_pkts++;
2371                         else if (rc == -EBUSY)  /* partial completion */
2372                                 break;
2373                 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2374                                     CMPL_BASE_TYPE_HWRM_DONE)) {
2375                         bnxt_hwrm_handler(bp, txcmp);
2376                 } else {
2377                         netdev_err(bp->dev,
2378                                    "Invalid completion received on special ring\n");
2379                 }
2380                 raw_cons = NEXT_RAW_CMP(raw_cons);
2381
2382                 if (rx_pkts == budget)
2383                         break;
2384         }
2385
2386         cpr->cp_raw_cons = raw_cons;
2387         BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2388         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2389
2390         if (event & BNXT_AGG_EVENT)
2391                 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2392
2393         if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
2394                 napi_complete_done(napi, rx_pkts);
2395                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2396         }
2397         return rx_pkts;
2398 }
2399
2400 static int bnxt_poll(struct napi_struct *napi, int budget)
2401 {
2402         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2403         struct bnxt *bp = bnapi->bp;
2404         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2405         int work_done = 0;
2406
2407         while (1) {
2408                 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
2409
2410                 if (work_done >= budget) {
2411                         if (!budget)
2412                                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2413                         break;
2414                 }
2415
2416                 if (!bnxt_has_work(bp, cpr)) {
2417                         if (napi_complete_done(napi, work_done))
2418                                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2419                         break;
2420                 }
2421         }
2422         if (bp->flags & BNXT_FLAG_DIM) {
2423                 struct dim_sample dim_sample = {};
2424
2425                 dim_update_sample(cpr->event_ctr,
2426                                   cpr->rx_packets,
2427                                   cpr->rx_bytes,
2428                                   &dim_sample);
2429                 net_dim(&cpr->dim, dim_sample);
2430         }
2431         return work_done;
2432 }
2433
2434 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2435 {
2436         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2437         int i, work_done = 0;
2438
2439         for (i = 0; i < 2; i++) {
2440                 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2441
2442                 if (cpr2) {
2443                         work_done += __bnxt_poll_work(bp, cpr2,
2444                                                       budget - work_done);
2445                         cpr->has_more_work |= cpr2->has_more_work;
2446                 }
2447         }
2448         return work_done;
2449 }
2450
2451 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2452                                  u64 dbr_type)
2453 {
2454         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2455         int i;
2456
2457         for (i = 0; i < 2; i++) {
2458                 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2459                 struct bnxt_db_info *db;
2460
2461                 if (cpr2 && cpr2->had_work_done) {
2462                         db = &cpr2->cp_db;
2463                         writeq(db->db_key64 | dbr_type |
2464                                RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2465                         cpr2->had_work_done = 0;
2466                 }
2467         }
2468         __bnxt_poll_work_done(bp, bnapi);
2469 }
2470
2471 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2472 {
2473         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2474         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2475         u32 raw_cons = cpr->cp_raw_cons;
2476         struct bnxt *bp = bnapi->bp;
2477         struct nqe_cn *nqcmp;
2478         int work_done = 0;
2479         u32 cons;
2480
2481         if (cpr->has_more_work) {
2482                 cpr->has_more_work = 0;
2483                 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
2484         }
2485         while (1) {
2486                 cons = RING_CMP(raw_cons);
2487                 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2488
2489                 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
2490                         if (cpr->has_more_work)
2491                                 break;
2492
2493                         __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL);
2494                         cpr->cp_raw_cons = raw_cons;
2495                         if (napi_complete_done(napi, work_done))
2496                                 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2497                                                   cpr->cp_raw_cons);
2498                         return work_done;
2499                 }
2500
2501                 /* The valid test of the entry must be done first before
2502                  * reading any further.
2503                  */
2504                 dma_rmb();
2505
2506                 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2507                         u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2508                         struct bnxt_cp_ring_info *cpr2;
2509
2510                         cpr2 = cpr->cp_ring_arr[idx];
2511                         work_done += __bnxt_poll_work(bp, cpr2,
2512                                                       budget - work_done);
2513                         cpr->has_more_work |= cpr2->has_more_work;
2514                 } else {
2515                         bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2516                 }
2517                 raw_cons = NEXT_RAW_CMP(raw_cons);
2518         }
2519         __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ);
2520         if (raw_cons != cpr->cp_raw_cons) {
2521                 cpr->cp_raw_cons = raw_cons;
2522                 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
2523         }
2524         return work_done;
2525 }
2526
2527 static void bnxt_free_tx_skbs(struct bnxt *bp)
2528 {
2529         int i, max_idx;
2530         struct pci_dev *pdev = bp->pdev;
2531
2532         if (!bp->tx_ring)
2533                 return;
2534
2535         max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2536         for (i = 0; i < bp->tx_nr_rings; i++) {
2537                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2538                 int j;
2539
2540                 for (j = 0; j < max_idx;) {
2541                         struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2542                         struct sk_buff *skb;
2543                         int k, last;
2544
2545                         if (i < bp->tx_nr_rings_xdp &&
2546                             tx_buf->action == XDP_REDIRECT) {
2547                                 dma_unmap_single(&pdev->dev,
2548                                         dma_unmap_addr(tx_buf, mapping),
2549                                         dma_unmap_len(tx_buf, len),
2550                                         PCI_DMA_TODEVICE);
2551                                 xdp_return_frame(tx_buf->xdpf);
2552                                 tx_buf->action = 0;
2553                                 tx_buf->xdpf = NULL;
2554                                 j++;
2555                                 continue;
2556                         }
2557
2558                         skb = tx_buf->skb;
2559                         if (!skb) {
2560                                 j++;
2561                                 continue;
2562                         }
2563
2564                         tx_buf->skb = NULL;
2565
2566                         if (tx_buf->is_push) {
2567                                 dev_kfree_skb(skb);
2568                                 j += 2;
2569                                 continue;
2570                         }
2571
2572                         dma_unmap_single(&pdev->dev,
2573                                          dma_unmap_addr(tx_buf, mapping),
2574                                          skb_headlen(skb),
2575                                          PCI_DMA_TODEVICE);
2576
2577                         last = tx_buf->nr_frags;
2578                         j += 2;
2579                         for (k = 0; k < last; k++, j++) {
2580                                 int ring_idx = j & bp->tx_ring_mask;
2581                                 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2582
2583                                 tx_buf = &txr->tx_buf_ring[ring_idx];
2584                                 dma_unmap_page(
2585                                         &pdev->dev,
2586                                         dma_unmap_addr(tx_buf, mapping),
2587                                         skb_frag_size(frag), PCI_DMA_TODEVICE);
2588                         }
2589                         dev_kfree_skb(skb);
2590                 }
2591                 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2592         }
2593 }
2594
2595 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
2596 {
2597         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
2598         struct pci_dev *pdev = bp->pdev;
2599         struct bnxt_tpa_idx_map *map;
2600         int i, max_idx, max_agg_idx;
2601
2602         max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2603         max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2604         if (!rxr->rx_tpa)
2605                 goto skip_rx_tpa_free;
2606
2607         for (i = 0; i < bp->max_tpa; i++) {
2608                 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
2609                 u8 *data = tpa_info->data;
2610
2611                 if (!data)
2612                         continue;
2613
2614                 dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
2615                                        bp->rx_buf_use_size, bp->rx_dir,
2616                                        DMA_ATTR_WEAK_ORDERING);
2617
2618                 tpa_info->data = NULL;
2619
2620                 kfree(data);
2621         }
2622
2623 skip_rx_tpa_free:
2624         for (i = 0; i < max_idx; i++) {
2625                 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
2626                 dma_addr_t mapping = rx_buf->mapping;
2627                 void *data = rx_buf->data;
2628
2629                 if (!data)
2630                         continue;
2631
2632                 rx_buf->data = NULL;
2633                 if (BNXT_RX_PAGE_MODE(bp)) {
2634                         mapping -= bp->rx_dma_offset;
2635                         dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE,
2636                                              bp->rx_dir,
2637                                              DMA_ATTR_WEAK_ORDERING);
2638                         page_pool_recycle_direct(rxr->page_pool, data);
2639                 } else {
2640                         dma_unmap_single_attrs(&pdev->dev, mapping,
2641                                                bp->rx_buf_use_size, bp->rx_dir,
2642                                                DMA_ATTR_WEAK_ORDERING);
2643                         kfree(data);
2644                 }
2645         }
2646         for (i = 0; i < max_agg_idx; i++) {
2647                 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
2648                 struct page *page = rx_agg_buf->page;
2649
2650                 if (!page)
2651                         continue;
2652
2653                 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2654                                      BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
2655                                      DMA_ATTR_WEAK_ORDERING);
2656
2657                 rx_agg_buf->page = NULL;
2658                 __clear_bit(i, rxr->rx_agg_bmap);
2659
2660                 __free_page(page);
2661         }
2662         if (rxr->rx_page) {
2663                 __free_page(rxr->rx_page);
2664                 rxr->rx_page = NULL;
2665         }
2666         map = rxr->rx_tpa_idx_map;
2667         if (map)
2668                 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
2669 }
2670
2671 static void bnxt_free_rx_skbs(struct bnxt *bp)
2672 {
2673         int i;
2674
2675         if (!bp->rx_ring)
2676                 return;
2677
2678         for (i = 0; i < bp->rx_nr_rings; i++)
2679                 bnxt_free_one_rx_ring_skbs(bp, i);
2680 }
2681
2682 static void bnxt_free_skbs(struct bnxt *bp)
2683 {
2684         bnxt_free_tx_skbs(bp);
2685         bnxt_free_rx_skbs(bp);
2686 }
2687
2688 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2689 {
2690         struct pci_dev *pdev = bp->pdev;
2691         int i;
2692
2693         for (i = 0; i < rmem->nr_pages; i++) {
2694                 if (!rmem->pg_arr[i])
2695                         continue;
2696
2697                 dma_free_coherent(&pdev->dev, rmem->page_size,
2698                                   rmem->pg_arr[i], rmem->dma_arr[i]);
2699
2700                 rmem->pg_arr[i] = NULL;
2701         }
2702         if (rmem->pg_tbl) {
2703                 size_t pg_tbl_size = rmem->nr_pages * 8;
2704
2705                 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2706                         pg_tbl_size = rmem->page_size;
2707                 dma_free_coherent(&pdev->dev, pg_tbl_size,
2708                                   rmem->pg_tbl, rmem->pg_tbl_map);
2709                 rmem->pg_tbl = NULL;
2710         }
2711         if (rmem->vmem_size && *rmem->vmem) {
2712                 vfree(*rmem->vmem);
2713                 *rmem->vmem = NULL;
2714         }
2715 }
2716
2717 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2718 {
2719         struct pci_dev *pdev = bp->pdev;
2720         u64 valid_bit = 0;
2721         int i;
2722
2723         if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
2724                 valid_bit = PTU_PTE_VALID;
2725         if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
2726                 size_t pg_tbl_size = rmem->nr_pages * 8;
2727
2728                 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2729                         pg_tbl_size = rmem->page_size;
2730                 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
2731                                                   &rmem->pg_tbl_map,
2732                                                   GFP_KERNEL);
2733                 if (!rmem->pg_tbl)
2734                         return -ENOMEM;
2735         }
2736
2737         for (i = 0; i < rmem->nr_pages; i++) {
2738                 u64 extra_bits = valid_bit;
2739
2740                 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2741                                                      rmem->page_size,
2742                                                      &rmem->dma_arr[i],
2743                                                      GFP_KERNEL);
2744                 if (!rmem->pg_arr[i])
2745                         return -ENOMEM;
2746
2747                 if (rmem->init_val)
2748                         memset(rmem->pg_arr[i], rmem->init_val,
2749                                rmem->page_size);
2750                 if (rmem->nr_pages > 1 || rmem->depth > 0) {
2751                         if (i == rmem->nr_pages - 2 &&
2752                             (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2753                                 extra_bits |= PTU_PTE_NEXT_TO_LAST;
2754                         else if (i == rmem->nr_pages - 1 &&
2755                                  (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2756                                 extra_bits |= PTU_PTE_LAST;
2757                         rmem->pg_tbl[i] =
2758                                 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
2759                 }
2760         }
2761
2762         if (rmem->vmem_size) {
2763                 *rmem->vmem = vzalloc(rmem->vmem_size);
2764                 if (!(*rmem->vmem))
2765                         return -ENOMEM;
2766         }
2767         return 0;
2768 }
2769
2770 static void bnxt_free_tpa_info(struct bnxt *bp)
2771 {
2772         int i;
2773
2774         for (i = 0; i < bp->rx_nr_rings; i++) {
2775                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2776
2777                 kfree(rxr->rx_tpa_idx_map);
2778                 rxr->rx_tpa_idx_map = NULL;
2779                 if (rxr->rx_tpa) {
2780                         kfree(rxr->rx_tpa[0].agg_arr);
2781                         rxr->rx_tpa[0].agg_arr = NULL;
2782                 }
2783                 kfree(rxr->rx_tpa);
2784                 rxr->rx_tpa = NULL;
2785         }
2786 }
2787
2788 static int bnxt_alloc_tpa_info(struct bnxt *bp)
2789 {
2790         int i, j, total_aggs = 0;
2791
2792         bp->max_tpa = MAX_TPA;
2793         if (bp->flags & BNXT_FLAG_CHIP_P5) {
2794                 if (!bp->max_tpa_v2)
2795                         return 0;
2796                 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
2797                 total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
2798         }
2799
2800         for (i = 0; i < bp->rx_nr_rings; i++) {
2801                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2802                 struct rx_agg_cmp *agg;
2803
2804                 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
2805                                       GFP_KERNEL);
2806                 if (!rxr->rx_tpa)
2807                         return -ENOMEM;
2808
2809                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
2810                         continue;
2811                 agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
2812                 rxr->rx_tpa[0].agg_arr = agg;
2813                 if (!agg)
2814                         return -ENOMEM;
2815                 for (j = 1; j < bp->max_tpa; j++)
2816                         rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
2817                 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
2818                                               GFP_KERNEL);
2819                 if (!rxr->rx_tpa_idx_map)
2820                         return -ENOMEM;
2821         }
2822         return 0;
2823 }
2824
2825 static void bnxt_free_rx_rings(struct bnxt *bp)
2826 {
2827         int i;
2828
2829         if (!bp->rx_ring)
2830                 return;
2831
2832         bnxt_free_tpa_info(bp);
2833         for (i = 0; i < bp->rx_nr_rings; i++) {
2834                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2835                 struct bnxt_ring_struct *ring;
2836
2837                 if (rxr->xdp_prog)
2838                         bpf_prog_put(rxr->xdp_prog);
2839
2840                 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
2841                         xdp_rxq_info_unreg(&rxr->xdp_rxq);
2842
2843                 page_pool_destroy(rxr->page_pool);
2844                 rxr->page_pool = NULL;
2845
2846                 kfree(rxr->rx_agg_bmap);
2847                 rxr->rx_agg_bmap = NULL;
2848
2849                 ring = &rxr->rx_ring_struct;
2850                 bnxt_free_ring(bp, &ring->ring_mem);
2851
2852                 ring = &rxr->rx_agg_ring_struct;
2853                 bnxt_free_ring(bp, &ring->ring_mem);
2854         }
2855 }
2856
2857 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
2858                                    struct bnxt_rx_ring_info *rxr)
2859 {
2860         struct page_pool_params pp = { 0 };
2861
2862         pp.pool_size = bp->rx_ring_size;
2863         pp.nid = dev_to_node(&bp->pdev->dev);
2864         pp.dev = &bp->pdev->dev;
2865         pp.dma_dir = DMA_BIDIRECTIONAL;
2866
2867         rxr->page_pool = page_pool_create(&pp);
2868         if (IS_ERR(rxr->page_pool)) {
2869                 int err = PTR_ERR(rxr->page_pool);
2870
2871                 rxr->page_pool = NULL;
2872                 return err;
2873         }
2874         return 0;
2875 }
2876
2877 static int bnxt_alloc_rx_rings(struct bnxt *bp)
2878 {
2879         int i, rc = 0, agg_rings = 0;
2880
2881         if (!bp->rx_ring)
2882                 return -ENOMEM;
2883
2884         if (bp->flags & BNXT_FLAG_AGG_RINGS)
2885                 agg_rings = 1;
2886
2887         for (i = 0; i < bp->rx_nr_rings; i++) {
2888                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2889                 struct bnxt_ring_struct *ring;
2890
2891                 ring = &rxr->rx_ring_struct;
2892
2893                 rc = bnxt_alloc_rx_page_pool(bp, rxr);
2894                 if (rc)
2895                         return rc;
2896
2897                 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i);
2898                 if (rc < 0)
2899                         return rc;
2900
2901                 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
2902                                                 MEM_TYPE_PAGE_POOL,
2903                                                 rxr->page_pool);
2904                 if (rc) {
2905                         xdp_rxq_info_unreg(&rxr->xdp_rxq);
2906                         return rc;
2907                 }
2908
2909                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2910                 if (rc)
2911                         return rc;
2912
2913                 ring->grp_idx = i;
2914                 if (agg_rings) {
2915                         u16 mem_size;
2916
2917                         ring = &rxr->rx_agg_ring_struct;
2918                         rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2919                         if (rc)
2920                                 return rc;
2921
2922                         ring->grp_idx = i;
2923                         rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
2924                         mem_size = rxr->rx_agg_bmap_size / 8;
2925                         rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
2926                         if (!rxr->rx_agg_bmap)
2927                                 return -ENOMEM;
2928                 }
2929         }
2930         if (bp->flags & BNXT_FLAG_TPA)
2931                 rc = bnxt_alloc_tpa_info(bp);
2932         return rc;
2933 }
2934
2935 static void bnxt_free_tx_rings(struct bnxt *bp)
2936 {
2937         int i;
2938         struct pci_dev *pdev = bp->pdev;
2939
2940         if (!bp->tx_ring)
2941                 return;
2942
2943         for (i = 0; i < bp->tx_nr_rings; i++) {
2944                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2945                 struct bnxt_ring_struct *ring;
2946
2947                 if (txr->tx_push) {
2948                         dma_free_coherent(&pdev->dev, bp->tx_push_size,
2949                                           txr->tx_push, txr->tx_push_mapping);
2950                         txr->tx_push = NULL;
2951                 }
2952
2953                 ring = &txr->tx_ring_struct;
2954
2955                 bnxt_free_ring(bp, &ring->ring_mem);
2956         }
2957 }
2958
2959 static int bnxt_alloc_tx_rings(struct bnxt *bp)
2960 {
2961         int i, j, rc;
2962         struct pci_dev *pdev = bp->pdev;
2963
2964         bp->tx_push_size = 0;
2965         if (bp->tx_push_thresh) {
2966                 int push_size;
2967
2968                 push_size  = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
2969                                         bp->tx_push_thresh);
2970
2971                 if (push_size > 256) {
2972                         push_size = 0;
2973                         bp->tx_push_thresh = 0;
2974                 }
2975
2976                 bp->tx_push_size = push_size;
2977         }
2978
2979         for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
2980                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2981                 struct bnxt_ring_struct *ring;
2982                 u8 qidx;
2983
2984                 ring = &txr->tx_ring_struct;
2985
2986                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2987                 if (rc)
2988                         return rc;
2989
2990                 ring->grp_idx = txr->bnapi->index;
2991                 if (bp->tx_push_size) {
2992                         dma_addr_t mapping;
2993
2994                         /* One pre-allocated DMA buffer to backup
2995                          * TX push operation
2996                          */
2997                         txr->tx_push = dma_alloc_coherent(&pdev->dev,
2998                                                 bp->tx_push_size,
2999                                                 &txr->tx_push_mapping,
3000                                                 GFP_KERNEL);
3001
3002                         if (!txr->tx_push)
3003                                 return -ENOMEM;
3004
3005                         mapping = txr->tx_push_mapping +
3006                                 sizeof(struct tx_push_bd);
3007                         txr->data_mapping = cpu_to_le64(mapping);
3008                 }
3009                 qidx = bp->tc_to_qidx[j];
3010                 ring->queue_id = bp->q_info[qidx].queue_id;
3011                 if (i < bp->tx_nr_rings_xdp)
3012                         continue;
3013                 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
3014                         j++;
3015         }
3016         return 0;
3017 }
3018
3019 static void bnxt_free_cp_rings(struct bnxt *bp)
3020 {
3021         int i;
3022
3023         if (!bp->bnapi)
3024                 return;
3025
3026         for (i = 0; i < bp->cp_nr_rings; i++) {
3027                 struct bnxt_napi *bnapi = bp->bnapi[i];
3028                 struct bnxt_cp_ring_info *cpr;
3029                 struct bnxt_ring_struct *ring;
3030                 int j;
3031
3032                 if (!bnapi)
3033                         continue;
3034
3035                 cpr = &bnapi->cp_ring;
3036                 ring = &cpr->cp_ring_struct;
3037
3038                 bnxt_free_ring(bp, &ring->ring_mem);
3039
3040                 for (j = 0; j < 2; j++) {
3041                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3042
3043                         if (cpr2) {
3044                                 ring = &cpr2->cp_ring_struct;
3045                                 bnxt_free_ring(bp, &ring->ring_mem);
3046                                 kfree(cpr2);
3047                                 cpr->cp_ring_arr[j] = NULL;
3048                         }
3049                 }
3050         }
3051 }
3052
3053 static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
3054 {
3055         struct bnxt_ring_mem_info *rmem;
3056         struct bnxt_ring_struct *ring;
3057         struct bnxt_cp_ring_info *cpr;
3058         int rc;
3059
3060         cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
3061         if (!cpr)
3062                 return NULL;
3063
3064         ring = &cpr->cp_ring_struct;
3065         rmem = &ring->ring_mem;
3066         rmem->nr_pages = bp->cp_nr_pages;
3067         rmem->page_size = HW_CMPD_RING_SIZE;
3068         rmem->pg_arr = (void **)cpr->cp_desc_ring;
3069         rmem->dma_arr = cpr->cp_desc_mapping;
3070         rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
3071         rc = bnxt_alloc_ring(bp, rmem);
3072         if (rc) {
3073                 bnxt_free_ring(bp, rmem);
3074                 kfree(cpr);
3075                 cpr = NULL;
3076         }
3077         return cpr;
3078 }
3079
3080 static int bnxt_alloc_cp_rings(struct bnxt *bp)
3081 {
3082         bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
3083         int i, rc, ulp_base_vec, ulp_msix;
3084
3085         ulp_msix = bnxt_get_ulp_msix_num(bp);
3086         ulp_base_vec = bnxt_get_ulp_msix_base(bp);
3087         for (i = 0; i < bp->cp_nr_rings; i++) {
3088                 struct bnxt_napi *bnapi = bp->bnapi[i];
3089                 struct bnxt_cp_ring_info *cpr;
3090                 struct bnxt_ring_struct *ring;
3091
3092                 if (!bnapi)
3093                         continue;
3094
3095                 cpr = &bnapi->cp_ring;
3096                 cpr->bnapi = bnapi;
3097                 ring = &cpr->cp_ring_struct;
3098
3099                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3100                 if (rc)
3101                         return rc;
3102
3103                 if (ulp_msix && i >= ulp_base_vec)
3104                         ring->map_idx = i + ulp_msix;
3105                 else
3106                         ring->map_idx = i;
3107
3108                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3109                         continue;
3110
3111                 if (i < bp->rx_nr_rings) {
3112                         struct bnxt_cp_ring_info *cpr2 =
3113                                 bnxt_alloc_cp_sub_ring(bp);
3114
3115                         cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
3116                         if (!cpr2)
3117                                 return -ENOMEM;
3118                         cpr2->bnapi = bnapi;
3119                 }
3120                 if ((sh && i < bp->tx_nr_rings) ||
3121                     (!sh && i >= bp->rx_nr_rings)) {
3122                         struct bnxt_cp_ring_info *cpr2 =
3123                                 bnxt_alloc_cp_sub_ring(bp);
3124
3125                         cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
3126                         if (!cpr2)
3127                                 return -ENOMEM;
3128                         cpr2->bnapi = bnapi;
3129                 }
3130         }
3131         return 0;
3132 }
3133
3134 static void bnxt_init_ring_struct(struct bnxt *bp)
3135 {
3136         int i;
3137
3138         for (i = 0; i < bp->cp_nr_rings; i++) {
3139                 struct bnxt_napi *bnapi = bp->bnapi[i];
3140                 struct bnxt_ring_mem_info *rmem;
3141                 struct bnxt_cp_ring_info *cpr;
3142                 struct bnxt_rx_ring_info *rxr;
3143                 struct bnxt_tx_ring_info *txr;
3144                 struct bnxt_ring_struct *ring;
3145
3146                 if (!bnapi)
3147                         continue;
3148
3149                 cpr = &bnapi->cp_ring;
3150                 ring = &cpr->cp_ring_struct;
3151                 rmem = &ring->ring_mem;
3152                 rmem->nr_pages = bp->cp_nr_pages;
3153                 rmem->page_size = HW_CMPD_RING_SIZE;
3154                 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3155                 rmem->dma_arr = cpr->cp_desc_mapping;
3156                 rmem->vmem_size = 0;
3157
3158                 rxr = bnapi->rx_ring;
3159                 if (!rxr)
3160                         goto skip_rx;
3161
3162                 ring = &rxr->rx_ring_struct;
3163                 rmem = &ring->ring_mem;
3164                 rmem->nr_pages = bp->rx_nr_pages;
3165                 rmem->page_size = HW_RXBD_RING_SIZE;
3166                 rmem->pg_arr = (void **)rxr->rx_desc_ring;
3167                 rmem->dma_arr = rxr->rx_desc_mapping;
3168                 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
3169                 rmem->vmem = (void **)&rxr->rx_buf_ring;
3170
3171                 ring = &rxr->rx_agg_ring_struct;
3172                 rmem = &ring->ring_mem;
3173                 rmem->nr_pages = bp->rx_agg_nr_pages;
3174                 rmem->page_size = HW_RXBD_RING_SIZE;
3175                 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
3176                 rmem->dma_arr = rxr->rx_agg_desc_mapping;
3177                 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
3178                 rmem->vmem = (void **)&rxr->rx_agg_ring;
3179
3180 skip_rx:
3181                 txr = bnapi->tx_ring;
3182                 if (!txr)
3183                         continue;
3184
3185                 ring = &txr->tx_ring_struct;
3186                 rmem = &ring->ring_mem;
3187                 rmem->nr_pages = bp->tx_nr_pages;
3188                 rmem->page_size = HW_RXBD_RING_SIZE;
3189                 rmem->pg_arr = (void **)txr->tx_desc_ring;
3190                 rmem->dma_arr = txr->tx_desc_mapping;
3191                 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
3192                 rmem->vmem = (void **)&txr->tx_buf_ring;
3193         }
3194 }
3195
3196 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
3197 {
3198         int i;
3199         u32 prod;
3200         struct rx_bd **rx_buf_ring;
3201
3202         rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
3203         for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
3204                 int j;
3205                 struct rx_bd *rxbd;
3206
3207                 rxbd = rx_buf_ring[i];
3208                 if (!rxbd)
3209                         continue;
3210
3211                 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
3212                         rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
3213                         rxbd->rx_bd_opaque = prod;
3214                 }
3215         }
3216 }
3217
3218 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
3219 {
3220         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
3221         struct net_device *dev = bp->dev;
3222         u32 prod;
3223         int i;
3224
3225         prod = rxr->rx_prod;
3226         for (i = 0; i < bp->rx_ring_size; i++) {
3227                 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
3228                         netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
3229                                     ring_nr, i, bp->rx_ring_size);
3230                         break;
3231                 }
3232                 prod = NEXT_RX(prod);
3233         }
3234         rxr->rx_prod = prod;
3235
3236         if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
3237                 return 0;
3238
3239         prod = rxr->rx_agg_prod;
3240         for (i = 0; i < bp->rx_agg_ring_size; i++) {
3241                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
3242                         netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
3243                                     ring_nr, i, bp->rx_ring_size);
3244                         break;
3245                 }
3246                 prod = NEXT_RX_AGG(prod);
3247         }
3248         rxr->rx_agg_prod = prod;
3249
3250         if (rxr->rx_tpa) {
3251                 dma_addr_t mapping;
3252                 u8 *data;
3253
3254                 for (i = 0; i < bp->max_tpa; i++) {
3255                         data = __bnxt_alloc_rx_data(bp, &mapping, GFP_KERNEL);
3256                         if (!data)
3257                                 return -ENOMEM;
3258
3259                         rxr->rx_tpa[i].data = data;
3260                         rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
3261                         rxr->rx_tpa[i].mapping = mapping;
3262                 }
3263         }
3264         return 0;
3265 }
3266
3267 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
3268 {
3269         struct bnxt_rx_ring_info *rxr;
3270         struct bnxt_ring_struct *ring;
3271         u32 type;
3272
3273         type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
3274                 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
3275
3276         if (NET_IP_ALIGN == 2)
3277                 type |= RX_BD_FLAGS_SOP;
3278
3279         rxr = &bp->rx_ring[ring_nr];
3280         ring = &rxr->rx_ring_struct;
3281         bnxt_init_rxbd_pages(ring, type);
3282
3283         if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
3284                 bpf_prog_add(bp->xdp_prog, 1);
3285                 rxr->xdp_prog = bp->xdp_prog;
3286         }
3287         ring->fw_ring_id = INVALID_HW_RING_ID;
3288
3289         ring = &rxr->rx_agg_ring_struct;
3290         ring->fw_ring_id = INVALID_HW_RING_ID;
3291
3292         if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
3293                 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
3294                         RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
3295
3296                 bnxt_init_rxbd_pages(ring, type);
3297         }
3298
3299         return bnxt_alloc_one_rx_ring(bp, ring_nr);
3300 }
3301
3302 static void bnxt_init_cp_rings(struct bnxt *bp)
3303 {
3304         int i, j;
3305
3306         for (i = 0; i < bp->cp_nr_rings; i++) {
3307                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
3308                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3309
3310                 ring->fw_ring_id = INVALID_HW_RING_ID;
3311                 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3312                 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3313                 for (j = 0; j < 2; j++) {
3314                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3315
3316                         if (!cpr2)
3317                                 continue;
3318
3319                         ring = &cpr2->cp_ring_struct;
3320                         ring->fw_ring_id = INVALID_HW_RING_ID;
3321                         cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3322                         cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3323                 }
3324         }
3325 }
3326
3327 static int bnxt_init_rx_rings(struct bnxt *bp)
3328 {
3329         int i, rc = 0;
3330
3331         if (BNXT_RX_PAGE_MODE(bp)) {
3332                 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
3333                 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
3334         } else {
3335                 bp->rx_offset = BNXT_RX_OFFSET;
3336                 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
3337         }
3338
3339         for (i = 0; i < bp->rx_nr_rings; i++) {
3340                 rc = bnxt_init_one_rx_ring(bp, i);
3341                 if (rc)
3342                         break;
3343         }
3344
3345         return rc;
3346 }
3347
3348 static int bnxt_init_tx_rings(struct bnxt *bp)
3349 {
3350         u16 i;
3351
3352         bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
3353                                    MAX_SKB_FRAGS + 1);
3354
3355         for (i = 0; i < bp->tx_nr_rings; i++) {
3356                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3357                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3358
3359                 ring->fw_ring_id = INVALID_HW_RING_ID;
3360         }
3361
3362         return 0;
3363 }
3364
3365 static void bnxt_free_ring_grps(struct bnxt *bp)
3366 {
3367         kfree(bp->grp_info);
3368         bp->grp_info = NULL;
3369 }
3370
3371 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
3372 {
3373         int i;
3374
3375         if (irq_re_init) {
3376                 bp->grp_info = kcalloc(bp->cp_nr_rings,
3377                                        sizeof(struct bnxt_ring_grp_info),
3378                                        GFP_KERNEL);
3379                 if (!bp->grp_info)
3380                         return -ENOMEM;
3381         }
3382         for (i = 0; i < bp->cp_nr_rings; i++) {
3383                 if (irq_re_init)
3384                         bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
3385                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3386                 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
3387                 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
3388                 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3389         }
3390         return 0;
3391 }
3392
3393 static void bnxt_free_vnics(struct bnxt *bp)
3394 {
3395         kfree(bp->vnic_info);
3396         bp->vnic_info = NULL;
3397         bp->nr_vnics = 0;
3398 }
3399
3400 static int bnxt_alloc_vnics(struct bnxt *bp)
3401 {
3402         int num_vnics = 1;
3403
3404 #ifdef CONFIG_RFS_ACCEL
3405         if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
3406                 num_vnics += bp->rx_nr_rings;
3407 #endif
3408
3409         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3410                 num_vnics++;
3411
3412         bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3413                                 GFP_KERNEL);
3414         if (!bp->vnic_info)
3415                 return -ENOMEM;
3416
3417         bp->nr_vnics = num_vnics;
3418         return 0;
3419 }
3420
3421 static void bnxt_init_vnics(struct bnxt *bp)
3422 {
3423         int i;
3424
3425         for (i = 0; i < bp->nr_vnics; i++) {
3426                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3427                 int j;
3428
3429                 vnic->fw_vnic_id = INVALID_HW_RING_ID;
3430                 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3431                         vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3432
3433                 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3434
3435                 if (bp->vnic_info[i].rss_hash_key) {
3436                         if (i == 0)
3437                                 prandom_bytes(vnic->rss_hash_key,
3438                                               HW_HASH_KEY_SIZE);
3439                         else
3440                                 memcpy(vnic->rss_hash_key,
3441                                        bp->vnic_info[0].rss_hash_key,
3442                                        HW_HASH_KEY_SIZE);
3443                 }
3444         }
3445 }
3446
3447 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3448 {
3449         int pages;
3450
3451         pages = ring_size / desc_per_pg;
3452
3453         if (!pages)
3454                 return 1;
3455
3456         pages++;
3457
3458         while (pages & (pages - 1))
3459                 pages++;
3460
3461         return pages;
3462 }
3463
3464 void bnxt_set_tpa_flags(struct bnxt *bp)
3465 {
3466         bp->flags &= ~BNXT_FLAG_TPA;
3467         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3468                 return;
3469         if (bp->dev->features & NETIF_F_LRO)
3470                 bp->flags |= BNXT_FLAG_LRO;
3471         else if (bp->dev->features & NETIF_F_GRO_HW)
3472                 bp->flags |= BNXT_FLAG_GRO;
3473 }
3474
3475 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3476  * be set on entry.
3477  */
3478 void bnxt_set_ring_params(struct bnxt *bp)
3479 {
3480         u32 ring_size, rx_size, rx_space, max_rx_cmpl;
3481         u32 agg_factor = 0, agg_ring_size = 0;
3482
3483         /* 8 for CRC and VLAN */
3484         rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3485
3486         rx_space = rx_size + NET_SKB_PAD +
3487                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3488
3489         bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3490         ring_size = bp->rx_ring_size;
3491         bp->rx_agg_ring_size = 0;
3492         bp->rx_agg_nr_pages = 0;
3493
3494         if (bp->flags & BNXT_FLAG_TPA)
3495                 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
3496
3497         bp->flags &= ~BNXT_FLAG_JUMBO;
3498         if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
3499                 u32 jumbo_factor;
3500
3501                 bp->flags |= BNXT_FLAG_JUMBO;
3502                 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3503                 if (jumbo_factor > agg_factor)
3504                         agg_factor = jumbo_factor;
3505         }
3506         agg_ring_size = ring_size * agg_factor;
3507
3508         if (agg_ring_size) {
3509                 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3510                                                         RX_DESC_CNT);
3511                 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3512                         u32 tmp = agg_ring_size;
3513
3514                         bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3515                         agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3516                         netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3517                                     tmp, agg_ring_size);
3518                 }
3519                 bp->rx_agg_ring_size = agg_ring_size;
3520                 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3521                 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3522                 rx_space = rx_size + NET_SKB_PAD +
3523                         SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3524         }
3525
3526         bp->rx_buf_use_size = rx_size;
3527         bp->rx_buf_size = rx_space;
3528
3529         bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3530         bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3531
3532         ring_size = bp->tx_ring_size;
3533         bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3534         bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3535
3536         max_rx_cmpl = bp->rx_ring_size;
3537         /* MAX TPA needs to be added because TPA_START completions are
3538          * immediately recycled, so the TPA completions are not bound by
3539          * the RX ring size.
3540          */
3541         if (bp->flags & BNXT_FLAG_TPA)
3542                 max_rx_cmpl += bp->max_tpa;
3543         /* RX and TPA completions are 32-byte, all others are 16-byte */
3544         ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
3545         bp->cp_ring_size = ring_size;
3546
3547         bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3548         if (bp->cp_nr_pages > MAX_CP_PAGES) {
3549                 bp->cp_nr_pages = MAX_CP_PAGES;
3550                 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3551                 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3552                             ring_size, bp->cp_ring_size);
3553         }
3554         bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3555         bp->cp_ring_mask = bp->cp_bit - 1;
3556 }
3557
3558 /* Changing allocation mode of RX rings.
3559  * TODO: Update when extending xdp_rxq_info to support allocation modes.
3560  */
3561 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
3562 {
3563         if (page_mode) {
3564                 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
3565                         return -EOPNOTSUPP;
3566                 bp->dev->max_mtu =
3567                         min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
3568                 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3569                 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
3570                 bp->rx_dir = DMA_BIDIRECTIONAL;
3571                 bp->rx_skb_func = bnxt_rx_page_skb;
3572                 /* Disable LRO or GRO_HW */
3573                 netdev_update_features(bp->dev);
3574         } else {
3575                 bp->dev->max_mtu = bp->max_mtu;
3576                 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
3577                 bp->rx_dir = DMA_FROM_DEVICE;
3578                 bp->rx_skb_func = bnxt_rx_skb;
3579         }
3580         return 0;
3581 }
3582
3583 static void bnxt_free_vnic_attributes(struct bnxt *bp)
3584 {
3585         int i;
3586         struct bnxt_vnic_info *vnic;
3587         struct pci_dev *pdev = bp->pdev;
3588
3589         if (!bp->vnic_info)
3590                 return;
3591
3592         for (i = 0; i < bp->nr_vnics; i++) {
3593                 vnic = &bp->vnic_info[i];
3594
3595                 kfree(vnic->fw_grp_ids);
3596                 vnic->fw_grp_ids = NULL;
3597
3598                 kfree(vnic->uc_list);
3599                 vnic->uc_list = NULL;
3600
3601                 if (vnic->mc_list) {
3602                         dma_free_coherent(&pdev->dev, vnic->mc_list_size,
3603                                           vnic->mc_list, vnic->mc_list_mapping);
3604                         vnic->mc_list = NULL;
3605                 }
3606
3607                 if (vnic->rss_table) {
3608                         dma_free_coherent(&pdev->dev, vnic->rss_table_size,
3609                                           vnic->rss_table,
3610                                           vnic->rss_table_dma_addr);
3611                         vnic->rss_table = NULL;
3612                 }
3613
3614                 vnic->rss_hash_key = NULL;
3615                 vnic->flags = 0;
3616         }
3617 }
3618
3619 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
3620 {
3621         int i, rc = 0, size;
3622         struct bnxt_vnic_info *vnic;
3623         struct pci_dev *pdev = bp->pdev;
3624         int max_rings;
3625
3626         for (i = 0; i < bp->nr_vnics; i++) {
3627                 vnic = &bp->vnic_info[i];
3628
3629                 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
3630                         int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
3631
3632                         if (mem_size > 0) {
3633                                 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
3634                                 if (!vnic->uc_list) {
3635                                         rc = -ENOMEM;
3636                                         goto out;
3637                                 }
3638                         }
3639                 }
3640
3641                 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
3642                         vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
3643                         vnic->mc_list =
3644                                 dma_alloc_coherent(&pdev->dev,
3645                                                    vnic->mc_list_size,
3646                                                    &vnic->mc_list_mapping,
3647                                                    GFP_KERNEL);
3648                         if (!vnic->mc_list) {
3649                                 rc = -ENOMEM;
3650                                 goto out;
3651                         }
3652                 }
3653
3654                 if (bp->flags & BNXT_FLAG_CHIP_P5)
3655                         goto vnic_skip_grps;
3656
3657                 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3658                         max_rings = bp->rx_nr_rings;
3659                 else
3660                         max_rings = 1;
3661
3662                 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
3663                 if (!vnic->fw_grp_ids) {
3664                         rc = -ENOMEM;
3665                         goto out;
3666                 }
3667 vnic_skip_grps:
3668                 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
3669                     !(vnic->flags & BNXT_VNIC_RSS_FLAG))
3670                         continue;
3671
3672                 /* Allocate rss table and hash key */
3673                 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
3674                 if (bp->flags & BNXT_FLAG_CHIP_P5)
3675                         size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
3676
3677                 vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
3678                 vnic->rss_table = dma_alloc_coherent(&pdev->dev,
3679                                                      vnic->rss_table_size,
3680                                                      &vnic->rss_table_dma_addr,
3681                                                      GFP_KERNEL);
3682                 if (!vnic->rss_table) {
3683                         rc = -ENOMEM;
3684                         goto out;
3685                 }
3686
3687                 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
3688                 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
3689         }
3690         return 0;
3691
3692 out:
3693         return rc;
3694 }
3695
3696 static void bnxt_free_hwrm_resources(struct bnxt *bp)
3697 {
3698         struct pci_dev *pdev = bp->pdev;
3699
3700         if (bp->hwrm_cmd_resp_addr) {
3701                 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
3702                                   bp->hwrm_cmd_resp_dma_addr);
3703                 bp->hwrm_cmd_resp_addr = NULL;
3704         }
3705
3706         if (bp->hwrm_cmd_kong_resp_addr) {
3707                 dma_free_coherent(&pdev->dev, PAGE_SIZE,
3708                                   bp->hwrm_cmd_kong_resp_addr,
3709                                   bp->hwrm_cmd_kong_resp_dma_addr);
3710                 bp->hwrm_cmd_kong_resp_addr = NULL;
3711         }
3712 }
3713
3714 static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp)
3715 {
3716         struct pci_dev *pdev = bp->pdev;
3717
3718         if (bp->hwrm_cmd_kong_resp_addr)
3719                 return 0;
3720
3721         bp->hwrm_cmd_kong_resp_addr =
3722                 dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3723                                    &bp->hwrm_cmd_kong_resp_dma_addr,
3724                                    GFP_KERNEL);
3725         if (!bp->hwrm_cmd_kong_resp_addr)
3726                 return -ENOMEM;
3727
3728         return 0;
3729 }
3730
3731 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
3732 {
3733         struct pci_dev *pdev = bp->pdev;
3734
3735         bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3736                                                    &bp->hwrm_cmd_resp_dma_addr,
3737                                                    GFP_KERNEL);
3738         if (!bp->hwrm_cmd_resp_addr)
3739                 return -ENOMEM;
3740
3741         return 0;
3742 }
3743
3744 static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
3745 {
3746         if (bp->hwrm_short_cmd_req_addr) {
3747                 struct pci_dev *pdev = bp->pdev;
3748
3749                 dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
3750                                   bp->hwrm_short_cmd_req_addr,
3751                                   bp->hwrm_short_cmd_req_dma_addr);
3752                 bp->hwrm_short_cmd_req_addr = NULL;
3753         }
3754 }
3755
3756 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
3757 {
3758         struct pci_dev *pdev = bp->pdev;
3759
3760         if (bp->hwrm_short_cmd_req_addr)
3761                 return 0;
3762
3763         bp->hwrm_short_cmd_req_addr =
3764                 dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
3765                                    &bp->hwrm_short_cmd_req_dma_addr,
3766                                    GFP_KERNEL);
3767         if (!bp->hwrm_short_cmd_req_addr)
3768                 return -ENOMEM;
3769
3770         return 0;
3771 }
3772
3773 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
3774 {
3775         kfree(stats->hw_masks);
3776         stats->hw_masks = NULL;
3777         kfree(stats->sw_stats);
3778         stats->sw_stats = NULL;
3779         if (stats->hw_stats) {
3780                 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
3781                                   stats->hw_stats_map);
3782                 stats->hw_stats = NULL;
3783         }
3784 }
3785
3786 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
3787                                 bool alloc_masks)
3788 {
3789         stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
3790                                              &stats->hw_stats_map, GFP_KERNEL);
3791         if (!stats->hw_stats)
3792                 return -ENOMEM;
3793
3794         stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
3795         if (!stats->sw_stats)
3796                 goto stats_mem_err;
3797
3798         if (alloc_masks) {
3799                 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
3800                 if (!stats->hw_masks)
3801                         goto stats_mem_err;
3802         }
3803         return 0;
3804
3805 stats_mem_err:
3806         bnxt_free_stats_mem(bp, stats);
3807         return -ENOMEM;
3808 }
3809
3810 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
3811 {
3812         int i;
3813
3814         for (i = 0; i < count; i++)
3815                 mask_arr[i] = mask;
3816 }
3817
3818 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
3819 {
3820         int i;
3821
3822         for (i = 0; i < count; i++)
3823                 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
3824 }
3825
3826 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
3827                                     struct bnxt_stats_mem *stats)
3828 {
3829         struct hwrm_func_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
3830         struct hwrm_func_qstats_ext_input req = {0};
3831         __le64 *hw_masks;
3832         int rc;
3833
3834         if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
3835             !(bp->flags & BNXT_FLAG_CHIP_P5))
3836                 return -EOPNOTSUPP;
3837
3838         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QSTATS_EXT, -1, -1);
3839         req.fid = cpu_to_le16(0xffff);
3840         req.flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
3841         mutex_lock(&bp->hwrm_cmd_lock);
3842         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3843         if (rc)
3844                 goto qstat_exit;
3845
3846         hw_masks = &resp->rx_ucast_pkts;
3847         bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
3848
3849 qstat_exit:
3850         mutex_unlock(&bp->hwrm_cmd_lock);
3851         return rc;
3852 }
3853
3854 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
3855 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
3856
3857 static void bnxt_init_stats(struct bnxt *bp)
3858 {
3859         struct bnxt_napi *bnapi = bp->bnapi[0];
3860         struct bnxt_cp_ring_info *cpr;
3861         struct bnxt_stats_mem *stats;
3862         __le64 *rx_stats, *tx_stats;
3863         int rc, rx_count, tx_count;
3864         u64 *rx_masks, *tx_masks;
3865         u64 mask;
3866         u8 flags;
3867
3868         cpr = &bnapi->cp_ring;
3869         stats = &cpr->stats;
3870         rc = bnxt_hwrm_func_qstat_ext(bp, stats);
3871         if (rc) {
3872                 if (bp->flags & BNXT_FLAG_CHIP_P5)
3873                         mask = (1ULL << 48) - 1;
3874                 else
3875                         mask = -1ULL;
3876                 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
3877         }
3878         if (bp->flags & BNXT_FLAG_PORT_STATS) {
3879                 stats = &bp->port_stats;
3880                 rx_stats = stats->hw_stats;
3881                 rx_masks = stats->hw_masks;
3882                 rx_count = sizeof(struct rx_port_stats) / 8;
3883                 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
3884                 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
3885                 tx_count = sizeof(struct tx_port_stats) / 8;
3886
3887                 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
3888                 rc = bnxt_hwrm_port_qstats(bp, flags);
3889                 if (rc) {
3890                         mask = (1ULL << 40) - 1;
3891
3892                         bnxt_fill_masks(rx_masks, mask, rx_count);
3893                         bnxt_fill_masks(tx_masks, mask, tx_count);
3894                 } else {
3895                         bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
3896                         bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
3897                         bnxt_hwrm_port_qstats(bp, 0);
3898                 }
3899         }
3900         if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
3901                 stats = &bp->rx_port_stats_ext;
3902                 rx_stats = stats->hw_stats;
3903                 rx_masks = stats->hw_masks;
3904                 rx_count = sizeof(struct rx_port_stats_ext) / 8;
3905                 stats = &bp->tx_port_stats_ext;
3906                 tx_stats = stats->hw_stats;
3907                 tx_masks = stats->hw_masks;
3908                 tx_count = sizeof(struct tx_port_stats_ext) / 8;
3909
3910                 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
3911                 rc = bnxt_hwrm_port_qstats_ext(bp, flags);
3912                 if (rc) {
3913                         mask = (1ULL << 40) - 1;
3914
3915                         bnxt_fill_masks(rx_masks, mask, rx_count);
3916                         if (tx_stats)
3917                                 bnxt_fill_masks(tx_masks, mask, tx_count);
3918                 } else {
3919                         bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
3920                         if (tx_stats)
3921                                 bnxt_copy_hw_masks(tx_masks, tx_stats,
3922                                                    tx_count);
3923                         bnxt_hwrm_port_qstats_ext(bp, 0);
3924                 }
3925         }
3926 }
3927
3928 static void bnxt_free_port_stats(struct bnxt *bp)
3929 {
3930         bp->flags &= ~BNXT_FLAG_PORT_STATS;
3931         bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
3932
3933         bnxt_free_stats_mem(bp, &bp->port_stats);
3934         bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
3935         bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
3936 }
3937
3938 static void bnxt_free_ring_stats(struct bnxt *bp)
3939 {
3940         int i;
3941
3942         if (!bp->bnapi)
3943                 return;
3944
3945         for (i = 0; i < bp->cp_nr_rings; i++) {
3946                 struct bnxt_napi *bnapi = bp->bnapi[i];
3947                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3948
3949                 bnxt_free_stats_mem(bp, &cpr->stats);
3950         }
3951 }
3952
3953 static int bnxt_alloc_stats(struct bnxt *bp)
3954 {
3955         u32 size, i;
3956         int rc;
3957
3958         size = bp->hw_ring_stats_size;
3959
3960         for (i = 0; i < bp->cp_nr_rings; i++) {
3961                 struct bnxt_napi *bnapi = bp->bnapi[i];
3962                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3963
3964                 cpr->stats.len = size;
3965                 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
3966                 if (rc)
3967                         return rc;
3968
3969                 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
3970         }
3971
3972         if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
3973                 return 0;
3974
3975         if (bp->port_stats.hw_stats)
3976                 goto alloc_ext_stats;
3977
3978         bp->port_stats.len = BNXT_PORT_STATS_SIZE;
3979         rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
3980         if (rc)
3981                 return rc;
3982
3983         bp->flags |= BNXT_FLAG_PORT_STATS;
3984
3985 alloc_ext_stats:
3986         /* Display extended statistics only if FW supports it */
3987         if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
3988                 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
3989                         return 0;
3990
3991         if (bp->rx_port_stats_ext.hw_stats)
3992                 goto alloc_tx_ext_stats;
3993
3994         bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
3995         rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
3996         /* Extended stats are optional */
3997         if (rc)
3998                 return 0;
3999
4000 alloc_tx_ext_stats:
4001         if (bp->tx_port_stats_ext.hw_stats)
4002                 return 0;
4003
4004         if (bp->hwrm_spec_code >= 0x10902 ||
4005             (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
4006                 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
4007                 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
4008                 /* Extended stats are optional */
4009                 if (rc)
4010                         return 0;
4011         }
4012         bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
4013         return 0;
4014 }
4015
4016 static void bnxt_clear_ring_indices(struct bnxt *bp)
4017 {
4018         int i;
4019
4020         if (!bp->bnapi)
4021                 return;
4022
4023         for (i = 0; i < bp->cp_nr_rings; i++) {
4024                 struct bnxt_napi *bnapi = bp->bnapi[i];
4025                 struct bnxt_cp_ring_info *cpr;
4026                 struct bnxt_rx_ring_info *rxr;
4027                 struct bnxt_tx_ring_info *txr;
4028
4029                 if (!bnapi)
4030                         continue;
4031
4032                 cpr = &bnapi->cp_ring;
4033                 cpr->cp_raw_cons = 0;
4034
4035                 txr = bnapi->tx_ring;
4036                 if (txr) {
4037                         txr->tx_prod = 0;
4038                         txr->tx_cons = 0;
4039                 }
4040
4041                 rxr = bnapi->rx_ring;
4042                 if (rxr) {
4043                         rxr->rx_prod = 0;
4044                         rxr->rx_agg_prod = 0;
4045                         rxr->rx_sw_agg_prod = 0;
4046                         rxr->rx_next_cons = 0;
4047                 }
4048         }
4049 }
4050
4051 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
4052 {
4053 #ifdef CONFIG_RFS_ACCEL
4054         int i;
4055
4056         /* Under rtnl_lock and all our NAPIs have been disabled.  It's
4057          * safe to delete the hash table.
4058          */
4059         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
4060                 struct hlist_head *head;
4061                 struct hlist_node *tmp;
4062                 struct bnxt_ntuple_filter *fltr;
4063
4064                 head = &bp->ntp_fltr_hash_tbl[i];
4065                 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
4066                         hlist_del(&fltr->hash);
4067                         kfree(fltr);
4068                 }
4069         }
4070         if (irq_reinit) {
4071                 kfree(bp->ntp_fltr_bmap);
4072                 bp->ntp_fltr_bmap = NULL;
4073         }
4074         bp->ntp_fltr_count = 0;
4075 #endif
4076 }
4077
4078 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
4079 {
4080 #ifdef CONFIG_RFS_ACCEL
4081         int i, rc = 0;
4082
4083         if (!(bp->flags & BNXT_FLAG_RFS))
4084                 return 0;
4085
4086         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
4087                 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
4088
4089         bp->ntp_fltr_count = 0;
4090         bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
4091                                     sizeof(long),
4092                                     GFP_KERNEL);
4093
4094         if (!bp->ntp_fltr_bmap)
4095                 rc = -ENOMEM;
4096
4097         return rc;
4098 #else
4099         return 0;
4100 #endif
4101 }
4102
4103 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
4104 {
4105         bnxt_free_vnic_attributes(bp);
4106         bnxt_free_tx_rings(bp);
4107         bnxt_free_rx_rings(bp);
4108         bnxt_free_cp_rings(bp);
4109         bnxt_free_ntp_fltrs(bp, irq_re_init);
4110         if (irq_re_init) {
4111                 bnxt_free_ring_stats(bp);
4112                 if (!(bp->fw_cap & BNXT_FW_CAP_PORT_STATS_NO_RESET))
4113                         bnxt_free_port_stats(bp);
4114                 bnxt_free_ring_grps(bp);
4115                 bnxt_free_vnics(bp);
4116                 kfree(bp->tx_ring_map);
4117                 bp->tx_ring_map = NULL;
4118                 kfree(bp->tx_ring);
4119                 bp->tx_ring = NULL;
4120                 kfree(bp->rx_ring);
4121                 bp->rx_ring = NULL;
4122                 kfree(bp->bnapi);
4123                 bp->bnapi = NULL;
4124         } else {
4125                 bnxt_clear_ring_indices(bp);
4126         }
4127 }
4128
4129 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
4130 {
4131         int i, j, rc, size, arr_size;
4132         void *bnapi;
4133
4134         if (irq_re_init) {
4135                 /* Allocate bnapi mem pointer array and mem block for
4136                  * all queues
4137                  */
4138                 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
4139                                 bp->cp_nr_rings);
4140                 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
4141                 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
4142                 if (!bnapi)
4143                         return -ENOMEM;
4144
4145                 bp->bnapi = bnapi;
4146                 bnapi += arr_size;
4147                 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
4148                         bp->bnapi[i] = bnapi;
4149                         bp->bnapi[i]->index = i;
4150                         bp->bnapi[i]->bp = bp;
4151                         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4152                                 struct bnxt_cp_ring_info *cpr =
4153                                         &bp->bnapi[i]->cp_ring;
4154
4155                                 cpr->cp_ring_struct.ring_mem.flags =
4156                                         BNXT_RMEM_RING_PTE_FLAG;
4157                         }
4158                 }
4159
4160                 bp->rx_ring = kcalloc(bp->rx_nr_rings,
4161                                       sizeof(struct bnxt_rx_ring_info),
4162                                       GFP_KERNEL);
4163                 if (!bp->rx_ring)
4164                         return -ENOMEM;
4165
4166                 for (i = 0; i < bp->rx_nr_rings; i++) {
4167                         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4168
4169                         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4170                                 rxr->rx_ring_struct.ring_mem.flags =
4171                                         BNXT_RMEM_RING_PTE_FLAG;
4172                                 rxr->rx_agg_ring_struct.ring_mem.flags =
4173                                         BNXT_RMEM_RING_PTE_FLAG;
4174                         }
4175                         rxr->bnapi = bp->bnapi[i];
4176                         bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
4177                 }
4178
4179                 bp->tx_ring = kcalloc(bp->tx_nr_rings,
4180                                       sizeof(struct bnxt_tx_ring_info),
4181                                       GFP_KERNEL);
4182                 if (!bp->tx_ring)
4183                         return -ENOMEM;
4184
4185                 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
4186                                           GFP_KERNEL);
4187
4188                 if (!bp->tx_ring_map)
4189                         return -ENOMEM;
4190
4191                 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4192                         j = 0;
4193                 else
4194                         j = bp->rx_nr_rings;
4195
4196                 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
4197                         struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4198
4199                         if (bp->flags & BNXT_FLAG_CHIP_P5)
4200                                 txr->tx_ring_struct.ring_mem.flags =
4201                                         BNXT_RMEM_RING_PTE_FLAG;
4202                         txr->bnapi = bp->bnapi[j];
4203                         bp->bnapi[j]->tx_ring = txr;
4204                         bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
4205                         if (i >= bp->tx_nr_rings_xdp) {
4206                                 txr->txq_index = i - bp->tx_nr_rings_xdp;
4207                                 bp->bnapi[j]->tx_int = bnxt_tx_int;
4208                         } else {
4209                                 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
4210                                 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
4211                         }
4212                 }
4213
4214                 rc = bnxt_alloc_stats(bp);
4215                 if (rc)
4216                         goto alloc_mem_err;
4217                 bnxt_init_stats(bp);
4218
4219                 rc = bnxt_alloc_ntp_fltrs(bp);
4220                 if (rc)
4221                         goto alloc_mem_err;
4222
4223                 rc = bnxt_alloc_vnics(bp);
4224                 if (rc)
4225                         goto alloc_mem_err;
4226         }
4227
4228         bnxt_init_ring_struct(bp);
4229
4230         rc = bnxt_alloc_rx_rings(bp);
4231         if (rc)
4232                 goto alloc_mem_err;
4233
4234         rc = bnxt_alloc_tx_rings(bp);
4235         if (rc)
4236                 goto alloc_mem_err;
4237
4238         rc = bnxt_alloc_cp_rings(bp);
4239         if (rc)
4240                 goto alloc_mem_err;
4241
4242         bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
4243                                   BNXT_VNIC_UCAST_FLAG;
4244         rc = bnxt_alloc_vnic_attributes(bp);
4245         if (rc)
4246                 goto alloc_mem_err;
4247         return 0;
4248
4249 alloc_mem_err:
4250         bnxt_free_mem(bp, true);
4251         return rc;
4252 }
4253
4254 static void bnxt_disable_int(struct bnxt *bp)
4255 {
4256         int i;
4257
4258         if (!bp->bnapi)
4259                 return;
4260
4261         for (i = 0; i < bp->cp_nr_rings; i++) {
4262                 struct bnxt_napi *bnapi = bp->bnapi[i];
4263                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4264                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4265
4266                 if (ring->fw_ring_id != INVALID_HW_RING_ID)
4267                         bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
4268         }
4269 }
4270
4271 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
4272 {
4273         struct bnxt_napi *bnapi = bp->bnapi[n];
4274         struct bnxt_cp_ring_info *cpr;
4275
4276         cpr = &bnapi->cp_ring;
4277         return cpr->cp_ring_struct.map_idx;
4278 }
4279
4280 static void bnxt_disable_int_sync(struct bnxt *bp)
4281 {
4282         int i;
4283
4284         atomic_inc(&bp->intr_sem);
4285
4286         bnxt_disable_int(bp);
4287         for (i = 0; i < bp->cp_nr_rings; i++) {
4288                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
4289
4290                 synchronize_irq(bp->irq_tbl[map_idx].vector);
4291         }
4292 }
4293
4294 static void bnxt_enable_int(struct bnxt *bp)
4295 {
4296         int i;
4297
4298         atomic_set(&bp->intr_sem, 0);
4299         for (i = 0; i < bp->cp_nr_rings; i++) {
4300                 struct bnxt_napi *bnapi = bp->bnapi[i];
4301                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4302
4303                 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
4304         }
4305 }
4306
4307 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
4308                             u16 cmpl_ring, u16 target_id)
4309 {
4310         struct input *req = request;
4311
4312         req->req_type = cpu_to_le16(req_type);
4313         req->cmpl_ring = cpu_to_le16(cmpl_ring);
4314         req->target_id = cpu_to_le16(target_id);
4315         if (bnxt_kong_hwrm_message(bp, req))
4316                 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
4317         else
4318                 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
4319 }
4320
4321 static int bnxt_hwrm_to_stderr(u32 hwrm_err)
4322 {
4323         switch (hwrm_err) {
4324         case HWRM_ERR_CODE_SUCCESS:
4325                 return 0;
4326         case HWRM_ERR_CODE_RESOURCE_LOCKED:
4327                 return -EROFS;
4328         case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
4329                 return -EACCES;
4330         case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
4331                 return -ENOSPC;
4332         case HWRM_ERR_CODE_INVALID_PARAMS:
4333         case HWRM_ERR_CODE_INVALID_FLAGS:
4334         case HWRM_ERR_CODE_INVALID_ENABLES:
4335         case HWRM_ERR_CODE_UNSUPPORTED_TLV:
4336         case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR:
4337                 return -EINVAL;
4338         case HWRM_ERR_CODE_NO_BUFFER:
4339                 return -ENOMEM;
4340         case HWRM_ERR_CODE_HOT_RESET_PROGRESS:
4341         case HWRM_ERR_CODE_BUSY:
4342                 return -EAGAIN;
4343         case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
4344                 return -EOPNOTSUPP;
4345         default:
4346                 return -EIO;
4347         }
4348 }
4349
4350 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
4351                                  int timeout, bool silent)
4352 {
4353         int i, intr_process, rc, tmo_count;
4354         struct input *req = msg;
4355         u32 *data = msg;
4356         u8 *valid;
4357         u16 cp_ring_id, len = 0;
4358         struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
4359         u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
4360         struct hwrm_short_input short_input = {0};
4361         u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
4362         u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
4363         u16 dst = BNXT_HWRM_CHNL_CHIMP;
4364
4365         if (BNXT_NO_FW_ACCESS(bp))
4366                 return -EBUSY;
4367
4368         if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
4369                 if (msg_len > bp->hwrm_max_ext_req_len ||
4370                     !bp->hwrm_short_cmd_req_addr)
4371                         return -EINVAL;
4372         }
4373
4374         if (bnxt_hwrm_kong_chnl(bp, req)) {
4375                 dst = BNXT_HWRM_CHNL_KONG;
4376                 bar_offset = BNXT_GRCPF_REG_KONG_COMM;
4377                 doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
4378                 resp = bp->hwrm_cmd_kong_resp_addr;
4379         }
4380
4381         memset(resp, 0, PAGE_SIZE);
4382         cp_ring_id = le16_to_cpu(req->cmpl_ring);
4383         intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
4384
4385         req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst));
4386         /* currently supports only one outstanding message */
4387         if (intr_process)
4388                 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
4389
4390         if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
4391             msg_len > BNXT_HWRM_MAX_REQ_LEN) {
4392                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
4393                 u16 max_msg_len;
4394
4395                 /* Set boundary for maximum extended request length for short
4396                  * cmd format. If passed up from device use the max supported
4397                  * internal req length.
4398                  */
4399                 max_msg_len = bp->hwrm_max_ext_req_len;
4400
4401                 memcpy(short_cmd_req, req, msg_len);
4402                 if (msg_len < max_msg_len)
4403                         memset(short_cmd_req + msg_len, 0,
4404                                max_msg_len - msg_len);
4405
4406                 short_input.req_type = req->req_type;
4407                 short_input.signature =
4408                                 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
4409                 short_input.size = cpu_to_le16(msg_len);
4410                 short_input.req_addr =
4411                         cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr);
4412
4413                 data = (u32 *)&short_input;
4414                 msg_len = sizeof(short_input);
4415
4416                 /* Sync memory write before updating doorbell */
4417                 wmb();
4418
4419                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
4420         }
4421
4422         /* Write request msg to hwrm channel */
4423         __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4);
4424
4425         for (i = msg_len; i < max_req_len; i += 4)
4426                 writel(0, bp->bar0 + bar_offset + i);
4427
4428         /* Ring channel doorbell */
4429         writel(1, bp->bar0 + doorbell_offset);
4430
4431         if (!pci_is_enabled(bp->pdev))
4432                 return 0;
4433
4434         if (!timeout)
4435                 timeout = DFLT_HWRM_CMD_TIMEOUT;
4436         /* convert timeout to usec */
4437         timeout *= 1000;
4438
4439         i = 0;
4440         /* Short timeout for the first few iterations:
4441          * number of loops = number of loops for short timeout +
4442          * number of loops for standard timeout.
4443          */
4444         tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
4445         timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
4446         tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
4447
4448         if (intr_process) {
4449                 u16 seq_id = bp->hwrm_intr_seq_id;
4450
4451                 /* Wait until hwrm response cmpl interrupt is processed */
4452                 while (bp->hwrm_intr_seq_id != (u16)~seq_id &&
4453                        i++ < tmo_count) {
4454                         /* Abort the wait for completion if the FW health
4455                          * check has failed.
4456                          */
4457                         if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4458                                 return -EBUSY;
4459                         /* on first few passes, just barely sleep */
4460                         if (i < HWRM_SHORT_TIMEOUT_COUNTER)
4461                                 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4462                                              HWRM_SHORT_MAX_TIMEOUT);
4463                         else
4464                                 usleep_range(HWRM_MIN_TIMEOUT,
4465                                              HWRM_MAX_TIMEOUT);
4466                 }
4467
4468                 if (bp->hwrm_intr_seq_id != (u16)~seq_id) {
4469                         if (!silent)
4470                                 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
4471                                            le16_to_cpu(req->req_type));
4472                         return -EBUSY;
4473                 }
4474                 len = le16_to_cpu(resp->resp_len);
4475                 valid = ((u8 *)resp) + len - 1;
4476         } else {
4477                 int j;
4478
4479                 /* Check if response len is updated */
4480                 for (i = 0; i < tmo_count; i++) {
4481                         /* Abort the wait for completion if the FW health
4482                          * check has failed.
4483                          */
4484                         if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4485                                 return -EBUSY;
4486                         len = le16_to_cpu(resp->resp_len);
4487                         if (len)
4488                                 break;
4489                         /* on first few passes, just barely sleep */
4490                         if (i < HWRM_SHORT_TIMEOUT_COUNTER)
4491                                 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4492                                              HWRM_SHORT_MAX_TIMEOUT);
4493                         else
4494                                 usleep_range(HWRM_MIN_TIMEOUT,
4495                                              HWRM_MAX_TIMEOUT);
4496                 }
4497
4498                 if (i >= tmo_count) {
4499                         if (!silent)
4500                                 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
4501                                            HWRM_TOTAL_TIMEOUT(i),
4502                                            le16_to_cpu(req->req_type),
4503                                            le16_to_cpu(req->seq_id), len);
4504                         return -EBUSY;
4505                 }
4506
4507                 /* Last byte of resp contains valid bit */
4508                 valid = ((u8 *)resp) + len - 1;
4509                 for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
4510                         /* make sure we read from updated DMA memory */
4511                         dma_rmb();
4512                         if (*valid)
4513                                 break;
4514                         usleep_range(1, 5);
4515                 }
4516
4517                 if (j >= HWRM_VALID_BIT_DELAY_USEC) {
4518                         if (!silent)
4519                                 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
4520                                            HWRM_TOTAL_TIMEOUT(i),
4521                                            le16_to_cpu(req->req_type),
4522                                            le16_to_cpu(req->seq_id), len,
4523                                            *valid);
4524                         return -EBUSY;
4525                 }
4526         }
4527
4528         /* Zero valid bit for compatibility.  Valid bit in an older spec
4529          * may become a new field in a newer spec.  We must make sure that
4530          * a new field not implemented by old spec will read zero.
4531          */
4532         *valid = 0;
4533         rc = le16_to_cpu(resp->error_code);
4534         if (rc && !silent)
4535                 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
4536                            le16_to_cpu(resp->req_type),
4537                            le16_to_cpu(resp->seq_id), rc);
4538         return bnxt_hwrm_to_stderr(rc);
4539 }
4540
4541 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4542 {
4543         return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
4544 }
4545
4546 int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4547                               int timeout)
4548 {
4549         return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4550 }
4551
4552 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4553 {
4554         int rc;
4555
4556         mutex_lock(&bp->hwrm_cmd_lock);
4557         rc = _hwrm_send_message(bp, msg, msg_len, timeout);
4558         mutex_unlock(&bp->hwrm_cmd_lock);
4559         return rc;
4560 }
4561
4562 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4563                              int timeout)
4564 {
4565         int rc;
4566
4567         mutex_lock(&bp->hwrm_cmd_lock);
4568         rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4569         mutex_unlock(&bp->hwrm_cmd_lock);
4570         return rc;
4571 }
4572
4573 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
4574                             bool async_only)
4575 {
4576         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
4577         struct hwrm_func_drv_rgtr_input req = {0};
4578         DECLARE_BITMAP(async_events_bmap, 256);
4579         u32 *events = (u32 *)async_events_bmap;
4580         u32 flags;
4581         int rc, i;
4582
4583         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
4584
4585         req.enables =
4586                 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
4587                             FUNC_DRV_RGTR_REQ_ENABLES_VER |
4588                             FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4589
4590         req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
4591         flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
4592         if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
4593                 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
4594         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
4595                 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
4596                          FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
4597         req.flags = cpu_to_le32(flags);
4598         req.ver_maj_8b = DRV_VER_MAJ;
4599         req.ver_min_8b = DRV_VER_MIN;
4600         req.ver_upd_8b = DRV_VER_UPD;
4601         req.ver_maj = cpu_to_le16(DRV_VER_MAJ);
4602         req.ver_min = cpu_to_le16(DRV_VER_MIN);
4603         req.ver_upd = cpu_to_le16(DRV_VER_UPD);
4604
4605         if (BNXT_PF(bp)) {
4606                 u32 data[8];
4607                 int i;
4608
4609                 memset(data, 0, sizeof(data));
4610                 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
4611                         u16 cmd = bnxt_vf_req_snif[i];
4612                         unsigned int bit, idx;
4613
4614                         idx = cmd / 32;
4615                         bit = cmd % 32;
4616                         data[idx] |= 1 << bit;
4617                 }
4618
4619                 for (i = 0; i < 8; i++)
4620                         req.vf_req_fwd[i] = cpu_to_le32(data[i]);
4621
4622                 req.enables |=
4623                         cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
4624         }
4625
4626         if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
4627                 req.flags |= cpu_to_le32(
4628                         FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
4629
4630         memset(async_events_bmap, 0, sizeof(async_events_bmap));
4631         for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
4632                 u16 event_id = bnxt_async_events_arr[i];
4633
4634                 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
4635                     !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
4636                         continue;
4637                 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
4638         }
4639         if (bmap && bmap_size) {
4640                 for (i = 0; i < bmap_size; i++) {
4641                         if (test_bit(i, bmap))
4642                                 __set_bit(i, async_events_bmap);
4643                 }
4644         }
4645         for (i = 0; i < 8; i++)
4646                 req.async_event_fwd[i] |= cpu_to_le32(events[i]);
4647
4648         if (async_only)
4649                 req.enables =
4650                         cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4651
4652         mutex_lock(&bp->hwrm_cmd_lock);
4653         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4654         if (!rc) {
4655                 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
4656                 if (resp->flags &
4657                     cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
4658                         bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4659         }
4660         mutex_unlock(&bp->hwrm_cmd_lock);
4661         return rc;
4662 }
4663
4664 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4665 {
4666         struct hwrm_func_drv_unrgtr_input req = {0};
4667
4668         if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
4669                 return 0;
4670
4671         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
4672         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4673 }
4674
4675 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4676 {
4677         u32 rc = 0;
4678         struct hwrm_tunnel_dst_port_free_input req = {0};
4679
4680         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
4681         req.tunnel_type = tunnel_type;
4682
4683         switch (tunnel_type) {
4684         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
4685                 req.tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
4686                 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
4687                 break;
4688         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
4689                 req.tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
4690                 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
4691                 break;
4692         default:
4693                 break;
4694         }
4695
4696         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4697         if (rc)
4698                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4699                            rc);
4700         return rc;
4701 }
4702
4703 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4704                                            u8 tunnel_type)
4705 {
4706         u32 rc = 0;
4707         struct hwrm_tunnel_dst_port_alloc_input req = {0};
4708         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4709
4710         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
4711
4712         req.tunnel_type = tunnel_type;
4713         req.tunnel_dst_port_val = port;
4714
4715         mutex_lock(&bp->hwrm_cmd_lock);
4716         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4717         if (rc) {
4718                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4719                            rc);
4720                 goto err_out;
4721         }
4722
4723         switch (tunnel_type) {
4724         case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
4725                 bp->vxlan_fw_dst_port_id =
4726                         le16_to_cpu(resp->tunnel_dst_port_id);
4727                 break;
4728         case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
4729                 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
4730                 break;
4731         default:
4732                 break;
4733         }
4734
4735 err_out:
4736         mutex_unlock(&bp->hwrm_cmd_lock);
4737         return rc;
4738 }
4739
4740 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4741 {
4742         struct hwrm_cfa_l2_set_rx_mask_input req = {0};
4743         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4744
4745         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
4746         req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4747
4748         req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4749         req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4750         req.mask = cpu_to_le32(vnic->rx_mask);
4751         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4752 }
4753
4754 #ifdef CONFIG_RFS_ACCEL
4755 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4756                                             struct bnxt_ntuple_filter *fltr)
4757 {
4758         struct hwrm_cfa_ntuple_filter_free_input req = {0};
4759
4760         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
4761         req.ntuple_filter_id = fltr->filter_id;
4762         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4763 }
4764
4765 #define BNXT_NTP_FLTR_FLAGS                                     \
4766         (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID |     \
4767          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE |        \
4768          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR |      \
4769          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE |      \
4770          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |       \
4771          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK |  \
4772          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |       \
4773          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK |  \
4774          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL |      \
4775          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT |         \
4776          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK |    \
4777          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT |         \
4778          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK |    \
4779          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
4780
4781 #define BNXT_NTP_TUNNEL_FLTR_FLAG                               \
4782                 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4783
4784 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4785                                              struct bnxt_ntuple_filter *fltr)
4786 {
4787         struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
4788         struct hwrm_cfa_ntuple_filter_alloc_output *resp;
4789         struct flow_keys *keys = &fltr->fkeys;
4790         struct bnxt_vnic_info *vnic;
4791         u32 flags = 0;
4792         int rc = 0;
4793
4794         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
4795         req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
4796
4797         if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
4798                 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
4799                 req.dst_id = cpu_to_le16(fltr->rxq);
4800         } else {
4801                 vnic = &bp->vnic_info[fltr->rxq + 1];
4802                 req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
4803         }
4804         req.flags = cpu_to_le32(flags);
4805         req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
4806
4807         req.ethertype = htons(ETH_P_IP);
4808         memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
4809         req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
4810         req.ip_protocol = keys->basic.ip_proto;
4811
4812         if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4813                 int i;
4814
4815                 req.ethertype = htons(ETH_P_IPV6);
4816                 req.ip_addr_type =
4817                         CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
4818                 *(struct in6_addr *)&req.src_ipaddr[0] =
4819                         keys->addrs.v6addrs.src;
4820                 *(struct in6_addr *)&req.dst_ipaddr[0] =
4821                         keys->addrs.v6addrs.dst;
4822                 for (i = 0; i < 4; i++) {
4823                         req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4824                         req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4825                 }
4826         } else {
4827                 req.src_ipaddr[0] = keys->addrs.v4addrs.src;
4828                 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4829                 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
4830                 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4831         }
4832         if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
4833                 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
4834                 req.tunnel_type =
4835                         CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
4836         }
4837
4838         req.src_port = keys->ports.src;
4839         req.src_port_mask = cpu_to_be16(0xffff);
4840         req.dst_port = keys->ports.dst;
4841         req.dst_port_mask = cpu_to_be16(0xffff);
4842
4843         mutex_lock(&bp->hwrm_cmd_lock);
4844         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4845         if (!rc) {
4846                 resp = bnxt_get_hwrm_resp_addr(bp, &req);
4847                 fltr->filter_id = resp->ntuple_filter_id;
4848         }
4849         mutex_unlock(&bp->hwrm_cmd_lock);
4850         return rc;
4851 }
4852 #endif
4853
4854 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
4855                                      u8 *mac_addr)
4856 {
4857         u32 rc = 0;
4858         struct hwrm_cfa_l2_filter_alloc_input req = {0};
4859         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4860
4861         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
4862         req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
4863         if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
4864                 req.flags |=
4865                         cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
4866         req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
4867         req.enables =
4868                 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
4869                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
4870                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
4871         memcpy(req.l2_addr, mac_addr, ETH_ALEN);
4872         req.l2_addr_mask[0] = 0xff;
4873         req.l2_addr_mask[1] = 0xff;
4874         req.l2_addr_mask[2] = 0xff;
4875         req.l2_addr_mask[3] = 0xff;
4876         req.l2_addr_mask[4] = 0xff;
4877         req.l2_addr_mask[5] = 0xff;
4878
4879         mutex_lock(&bp->hwrm_cmd_lock);
4880         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4881         if (!rc)
4882                 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
4883                                                         resp->l2_filter_id;
4884         mutex_unlock(&bp->hwrm_cmd_lock);
4885         return rc;
4886 }
4887
4888 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
4889 {
4890         u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
4891         int rc = 0;
4892
4893         /* Any associated ntuple filters will also be cleared by firmware. */
4894         mutex_lock(&bp->hwrm_cmd_lock);
4895         for (i = 0; i < num_of_vnics; i++) {
4896                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4897
4898                 for (j = 0; j < vnic->uc_filter_count; j++) {
4899                         struct hwrm_cfa_l2_filter_free_input req = {0};
4900
4901                         bnxt_hwrm_cmd_hdr_init(bp, &req,
4902                                                HWRM_CFA_L2_FILTER_FREE, -1, -1);
4903
4904                         req.l2_filter_id = vnic->fw_l2_filter_id[j];
4905
4906                         rc = _hwrm_send_message(bp, &req, sizeof(req),
4907                                                 HWRM_CMD_TIMEOUT);
4908                 }
4909                 vnic->uc_filter_count = 0;
4910         }
4911         mutex_unlock(&bp->hwrm_cmd_lock);
4912
4913         return rc;
4914 }
4915
4916 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
4917 {
4918         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4919         u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
4920         struct hwrm_vnic_tpa_cfg_input req = {0};
4921
4922         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
4923                 return 0;
4924
4925         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
4926
4927         if (tpa_flags) {
4928                 u16 mss = bp->dev->mtu - 40;
4929                 u32 nsegs, n, segs = 0, flags;
4930
4931                 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
4932                         VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
4933                         VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
4934                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
4935                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
4936                 if (tpa_flags & BNXT_FLAG_GRO)
4937                         flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
4938
4939                 req.flags = cpu_to_le32(flags);
4940
4941                 req.enables =
4942                         cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
4943                                     VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
4944                                     VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
4945
4946                 /* Number of segs are log2 units, and first packet is not
4947                  * included as part of this units.
4948                  */
4949                 if (mss <= BNXT_RX_PAGE_SIZE) {
4950                         n = BNXT_RX_PAGE_SIZE / mss;
4951                         nsegs = (MAX_SKB_FRAGS - 1) * n;
4952                 } else {
4953                         n = mss / BNXT_RX_PAGE_SIZE;
4954                         if (mss & (BNXT_RX_PAGE_SIZE - 1))
4955                                 n++;
4956                         nsegs = (MAX_SKB_FRAGS - n) / n;
4957                 }
4958
4959                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4960                         segs = MAX_TPA_SEGS_P5;
4961                         max_aggs = bp->max_tpa;
4962                 } else {
4963                         segs = ilog2(nsegs);
4964                 }
4965                 req.max_agg_segs = cpu_to_le16(segs);
4966                 req.max_aggs = cpu_to_le16(max_aggs);
4967
4968                 req.min_agg_len = cpu_to_le32(512);
4969         }
4970         req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4971
4972         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4973 }
4974
4975 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
4976 {
4977         struct bnxt_ring_grp_info *grp_info;
4978
4979         grp_info = &bp->grp_info[ring->grp_idx];
4980         return grp_info->cp_fw_ring_id;
4981 }
4982
4983 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
4984 {
4985         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4986                 struct bnxt_napi *bnapi = rxr->bnapi;
4987                 struct bnxt_cp_ring_info *cpr;
4988
4989                 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
4990                 return cpr->cp_ring_struct.fw_ring_id;
4991         } else {
4992                 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
4993         }
4994 }
4995
4996 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
4997 {
4998         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4999                 struct bnxt_napi *bnapi = txr->bnapi;
5000                 struct bnxt_cp_ring_info *cpr;
5001
5002                 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
5003                 return cpr->cp_ring_struct.fw_ring_id;
5004         } else {
5005                 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
5006         }
5007 }
5008
5009 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
5010 {
5011         int entries;
5012
5013         if (bp->flags & BNXT_FLAG_CHIP_P5)
5014                 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
5015         else
5016                 entries = HW_HASH_INDEX_SIZE;
5017
5018         bp->rss_indir_tbl_entries = entries;
5019         bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
5020                                           GFP_KERNEL);
5021         if (!bp->rss_indir_tbl)
5022                 return -ENOMEM;
5023         return 0;
5024 }
5025
5026 static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
5027 {
5028         u16 max_rings, max_entries, pad, i;
5029
5030         if (!bp->rx_nr_rings)
5031                 return;
5032
5033         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5034                 max_rings = bp->rx_nr_rings - 1;
5035         else
5036                 max_rings = bp->rx_nr_rings;
5037
5038         max_entries = bnxt_get_rxfh_indir_size(bp->dev);
5039
5040         for (i = 0; i < max_entries; i++)
5041                 bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
5042
5043         pad = bp->rss_indir_tbl_entries - max_entries;
5044         if (pad)
5045                 memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
5046 }
5047
5048 static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
5049 {
5050         u16 i, tbl_size, max_ring = 0;
5051
5052         if (!bp->rss_indir_tbl)
5053                 return 0;
5054
5055         tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5056         for (i = 0; i < tbl_size; i++)
5057                 max_ring = max(max_ring, bp->rss_indir_tbl[i]);
5058         return max_ring;
5059 }
5060
5061 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
5062 {
5063         if (bp->flags & BNXT_FLAG_CHIP_P5)
5064                 return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
5065         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5066                 return 2;
5067         return 1;
5068 }
5069
5070 static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5071 {
5072         bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
5073         u16 i, j;
5074
5075         /* Fill the RSS indirection table with ring group ids */
5076         for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
5077                 if (!no_rss)
5078                         j = bp->rss_indir_tbl[i];
5079                 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
5080         }
5081 }
5082
5083 static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
5084                                       struct bnxt_vnic_info *vnic)
5085 {
5086         __le16 *ring_tbl = vnic->rss_table;
5087         struct bnxt_rx_ring_info *rxr;
5088         u16 tbl_size, i;
5089
5090         tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5091
5092         for (i = 0; i < tbl_size; i++) {
5093                 u16 ring_id, j;
5094
5095                 j = bp->rss_indir_tbl[i];
5096                 rxr = &bp->rx_ring[j];
5097
5098                 ring_id = rxr->rx_ring_struct.fw_ring_id;
5099                 *ring_tbl++ = cpu_to_le16(ring_id);
5100                 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5101                 *ring_tbl++ = cpu_to_le16(ring_id);
5102         }
5103 }
5104
5105 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5106 {
5107         if (bp->flags & BNXT_FLAG_CHIP_P5)
5108                 __bnxt_fill_hw_rss_tbl_p5(bp, vnic);
5109         else
5110                 __bnxt_fill_hw_rss_tbl(bp, vnic);
5111 }
5112
5113 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
5114 {
5115         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5116         struct hwrm_vnic_rss_cfg_input req = {0};
5117
5118         if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
5119             vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
5120                 return 0;
5121
5122         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
5123         if (set_rss) {
5124                 bnxt_fill_hw_rss_tbl(bp, vnic);
5125                 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
5126                 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5127                 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
5128                 req.hash_key_tbl_addr =
5129                         cpu_to_le64(vnic->rss_hash_key_dma_addr);
5130         }
5131         req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5132         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5133 }
5134
5135 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
5136 {
5137         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5138         struct hwrm_vnic_rss_cfg_input req = {0};
5139         dma_addr_t ring_tbl_map;
5140         u32 i, nr_ctxs;
5141
5142         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
5143         req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5144         if (!set_rss) {
5145                 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5146                 return 0;
5147         }
5148         bnxt_fill_hw_rss_tbl(bp, vnic);
5149         req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
5150         req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5151         req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
5152         ring_tbl_map = vnic->rss_table_dma_addr;
5153         nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
5154         for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
5155                 int rc;
5156
5157                 req.ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
5158                 req.ring_table_pair_index = i;
5159                 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
5160                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5161                 if (rc)
5162                         return rc;
5163         }
5164         return 0;
5165 }
5166
5167 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
5168 {
5169         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5170         struct hwrm_vnic_plcmodes_cfg_input req = {0};
5171
5172         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
5173         req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
5174                                 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
5175                                 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
5176         req.enables =
5177                 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
5178                             VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
5179         /* thresholds not implemented in firmware yet */
5180         req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
5181         req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
5182         req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5183         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5184 }
5185
5186 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
5187                                         u16 ctx_idx)
5188 {
5189         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
5190
5191         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
5192         req.rss_cos_lb_ctx_id =
5193                 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
5194
5195         hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5196         bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
5197 }
5198
5199 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
5200 {
5201         int i, j;
5202
5203         for (i = 0; i < bp->nr_vnics; i++) {
5204                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5205
5206                 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
5207                         if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
5208                                 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
5209                 }
5210         }
5211         bp->rsscos_nr_ctxs = 0;
5212 }
5213
5214 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
5215 {
5216         int rc;
5217         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
5218         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
5219                                                 bp->hwrm_cmd_resp_addr;
5220
5221         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
5222                                -1);
5223
5224         mutex_lock(&bp->hwrm_cmd_lock);
5225         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5226         if (!rc)
5227                 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
5228                         le16_to_cpu(resp->rss_cos_lb_ctx_id);
5229         mutex_unlock(&bp->hwrm_cmd_lock);
5230
5231         return rc;
5232 }
5233
5234 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
5235 {
5236         if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
5237                 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
5238         return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
5239 }
5240
5241 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
5242 {
5243         unsigned int ring = 0, grp_idx;
5244         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5245         struct hwrm_vnic_cfg_input req = {0};
5246         u16 def_vlan = 0;
5247
5248         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
5249
5250         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5251                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
5252
5253                 req.default_rx_ring_id =
5254                         cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
5255                 req.default_cmpl_ring_id =
5256                         cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
5257                 req.enables =
5258                         cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
5259                                     VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
5260                 goto vnic_mru;
5261         }
5262         req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
5263         /* Only RSS support for now TBD: COS & LB */
5264         if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
5265                 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5266                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5267                                            VNIC_CFG_REQ_ENABLES_MRU);
5268         } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
5269                 req.rss_rule =
5270                         cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
5271                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5272                                            VNIC_CFG_REQ_ENABLES_MRU);
5273                 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
5274         } else {
5275                 req.rss_rule = cpu_to_le16(0xffff);
5276         }
5277
5278         if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
5279             (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
5280                 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
5281                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
5282         } else {
5283                 req.cos_rule = cpu_to_le16(0xffff);
5284         }
5285
5286         if (vnic->flags & BNXT_VNIC_RSS_FLAG)
5287                 ring = 0;
5288         else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
5289                 ring = vnic_id - 1;
5290         else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
5291                 ring = bp->rx_nr_rings - 1;
5292
5293         grp_idx = bp->rx_ring[ring].bnapi->index;
5294         req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
5295         req.lb_rule = cpu_to_le16(0xffff);
5296 vnic_mru:
5297         req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
5298
5299         req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5300 #ifdef CONFIG_BNXT_SRIOV
5301         if (BNXT_VF(bp))
5302                 def_vlan = bp->vf.vlan;
5303 #endif
5304         if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
5305                 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
5306         if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
5307                 req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
5308
5309         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5310 }
5311
5312 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
5313 {
5314         if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
5315                 struct hwrm_vnic_free_input req = {0};
5316
5317                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
5318                 req.vnic_id =
5319                         cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
5320
5321                 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5322                 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
5323         }
5324 }
5325
5326 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
5327 {
5328         u16 i;
5329
5330         for (i = 0; i < bp->nr_vnics; i++)
5331                 bnxt_hwrm_vnic_free_one(bp, i);
5332 }
5333
5334 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
5335                                 unsigned int start_rx_ring_idx,
5336                                 unsigned int nr_rings)
5337 {
5338         int rc = 0;
5339         unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
5340         struct hwrm_vnic_alloc_input req = {0};
5341         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5342         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5343
5344         if (bp->flags & BNXT_FLAG_CHIP_P5)
5345                 goto vnic_no_ring_grps;
5346
5347         /* map ring groups to this vnic */
5348         for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
5349                 grp_idx = bp->rx_ring[i].bnapi->index;
5350                 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
5351                         netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
5352                                    j, nr_rings);
5353                         break;
5354                 }
5355                 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
5356         }
5357
5358 vnic_no_ring_grps:
5359         for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
5360                 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
5361         if (vnic_id == 0)
5362                 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
5363
5364         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
5365
5366         mutex_lock(&bp->hwrm_cmd_lock);
5367         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5368         if (!rc)
5369                 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
5370         mutex_unlock(&bp->hwrm_cmd_lock);
5371         return rc;
5372 }
5373
5374 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
5375 {
5376         struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5377         struct hwrm_vnic_qcaps_input req = {0};
5378         int rc;
5379
5380         bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
5381         bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
5382         if (bp->hwrm_spec_code < 0x10600)
5383                 return 0;
5384
5385         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
5386         mutex_lock(&bp->hwrm_cmd_lock);
5387         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5388         if (!rc) {
5389                 u32 flags = le32_to_cpu(resp->flags);
5390
5391                 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
5392                     (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
5393                         bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
5394                 if (flags &
5395                     VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
5396                         bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
5397
5398                 /* Older P5 fw before EXT_HW_STATS support did not set
5399                  * VLAN_STRIP_CAP properly.
5400                  */
5401                 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
5402                     (BNXT_CHIP_P5_THOR(bp) &&
5403                      !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
5404                         bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
5405                 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
5406                 if (bp->max_tpa_v2) {
5407                         if (BNXT_CHIP_P5_THOR(bp))
5408                                 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
5409                         else
5410                                 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2;
5411                 }
5412         }
5413         mutex_unlock(&bp->hwrm_cmd_lock);
5414         return rc;
5415 }
5416
5417 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
5418 {
5419         u16 i;
5420         u32 rc = 0;
5421
5422         if (bp->flags & BNXT_FLAG_CHIP_P5)
5423                 return 0;
5424
5425         mutex_lock(&bp->hwrm_cmd_lock);
5426         for (i = 0; i < bp->rx_nr_rings; i++) {
5427                 struct hwrm_ring_grp_alloc_input req = {0};
5428                 struct hwrm_ring_grp_alloc_output *resp =
5429                                         bp->hwrm_cmd_resp_addr;
5430                 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
5431
5432                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
5433
5434                 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
5435                 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
5436                 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
5437                 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
5438
5439                 rc = _hwrm_send_message(bp, &req, sizeof(req),
5440                                         HWRM_CMD_TIMEOUT);
5441                 if (rc)
5442                         break;
5443
5444                 bp->grp_info[grp_idx].fw_grp_id =
5445                         le32_to_cpu(resp->ring_group_id);
5446         }
5447         mutex_unlock(&bp->hwrm_cmd_lock);
5448         return rc;
5449 }
5450
5451 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
5452 {
5453         u16 i;
5454         struct hwrm_ring_grp_free_input req = {0};
5455
5456         if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
5457                 return;
5458
5459         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
5460
5461         mutex_lock(&bp->hwrm_cmd_lock);
5462         for (i = 0; i < bp->cp_nr_rings; i++) {
5463                 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
5464                         continue;
5465                 req.ring_group_id =
5466                         cpu_to_le32(bp->grp_info[i].fw_grp_id);
5467
5468                 _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5469                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
5470         }
5471         mutex_unlock(&bp->hwrm_cmd_lock);
5472 }
5473
5474 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
5475                                     struct bnxt_ring_struct *ring,
5476                                     u32 ring_type, u32 map_index)
5477 {
5478         int rc = 0, err = 0;
5479         struct hwrm_ring_alloc_input req = {0};
5480         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5481         struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
5482         struct bnxt_ring_grp_info *grp_info;
5483         u16 ring_id;
5484
5485         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
5486
5487         req.enables = 0;
5488         if (rmem->nr_pages > 1) {
5489                 req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
5490                 /* Page size is in log2 units */
5491                 req.page_size = BNXT_PAGE_SHIFT;
5492                 req.page_tbl_depth = 1;
5493         } else {
5494                 req.page_tbl_addr =  cpu_to_le64(rmem->dma_arr[0]);
5495         }
5496         req.fbo = 0;
5497         /* Association of ring index with doorbell index and MSIX number */
5498         req.logical_id = cpu_to_le16(map_index);
5499
5500         switch (ring_type) {
5501         case HWRM_RING_ALLOC_TX: {
5502                 struct bnxt_tx_ring_info *txr;
5503
5504                 txr = container_of(ring, struct bnxt_tx_ring_info,
5505                                    tx_ring_struct);
5506                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
5507                 /* Association of transmit ring with completion ring */
5508                 grp_info = &bp->grp_info[ring->grp_idx];
5509                 req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
5510                 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
5511                 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5512                 req.queue_id = cpu_to_le16(ring->queue_id);
5513                 break;
5514         }
5515         case HWRM_RING_ALLOC_RX:
5516                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5517                 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
5518                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5519                         u16 flags = 0;
5520
5521                         /* Association of rx ring with stats context */
5522                         grp_info = &bp->grp_info[ring->grp_idx];
5523                         req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
5524                         req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5525                         req.enables |= cpu_to_le32(
5526                                 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5527                         if (NET_IP_ALIGN == 2)
5528                                 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
5529                         req.flags = cpu_to_le16(flags);
5530                 }
5531                 break;
5532         case HWRM_RING_ALLOC_AGG:
5533                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5534                         req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
5535                         /* Association of agg ring with rx ring */
5536                         grp_info = &bp->grp_info[ring->grp_idx];
5537                         req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
5538                         req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
5539                         req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5540                         req.enables |= cpu_to_le32(
5541                                 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
5542                                 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5543                 } else {
5544                         req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5545                 }
5546                 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
5547                 break;
5548         case HWRM_RING_ALLOC_CMPL:
5549                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
5550                 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
5551                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5552                         /* Association of cp ring with nq */
5553                         grp_info = &bp->grp_info[map_index];
5554                         req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
5555                         req.cq_handle = cpu_to_le64(ring->handle);
5556                         req.enables |= cpu_to_le32(
5557                                 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
5558                 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
5559                         req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5560                 }
5561                 break;
5562         case HWRM_RING_ALLOC_NQ:
5563                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
5564                 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
5565                 if (bp->flags & BNXT_FLAG_USING_MSIX)
5566                         req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5567                 break;
5568         default:
5569                 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
5570                            ring_type);
5571                 return -1;
5572         }
5573
5574         mutex_lock(&bp->hwrm_cmd_lock);
5575         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5576         err = le16_to_cpu(resp->error_code);
5577         ring_id = le16_to_cpu(resp->ring_id);
5578         mutex_unlock(&bp->hwrm_cmd_lock);
5579
5580         if (rc || err) {
5581                 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
5582                            ring_type, rc, err);
5583                 return -EIO;
5584         }
5585         ring->fw_ring_id = ring_id;
5586         return rc;
5587 }
5588
5589 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
5590 {
5591         int rc;
5592
5593         if (BNXT_PF(bp)) {
5594                 struct hwrm_func_cfg_input req = {0};
5595
5596                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
5597                 req.fid = cpu_to_le16(0xffff);
5598                 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5599                 req.async_event_cr = cpu_to_le16(idx);
5600                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5601         } else {
5602                 struct hwrm_func_vf_cfg_input req = {0};
5603
5604                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
5605                 req.enables =
5606                         cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5607                 req.async_event_cr = cpu_to_le16(idx);
5608                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5609         }
5610         return rc;
5611 }
5612
5613 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5614                         u32 map_idx, u32 xid)
5615 {
5616         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5617                 if (BNXT_PF(bp))
5618                         db->doorbell = bp->bar1 + DB_PF_OFFSET_P5;
5619                 else
5620                         db->doorbell = bp->bar1 + DB_VF_OFFSET_P5;
5621                 switch (ring_type) {
5622                 case HWRM_RING_ALLOC_TX:
5623                         db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
5624                         break;
5625                 case HWRM_RING_ALLOC_RX:
5626                 case HWRM_RING_ALLOC_AGG:
5627                         db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
5628                         break;
5629                 case HWRM_RING_ALLOC_CMPL:
5630                         db->db_key64 = DBR_PATH_L2;
5631                         break;
5632                 case HWRM_RING_ALLOC_NQ:
5633                         db->db_key64 = DBR_PATH_L2;
5634                         break;
5635                 }
5636                 db->db_key64 |= (u64)xid << DBR_XID_SFT;
5637         } else {
5638                 db->doorbell = bp->bar1 + map_idx * 0x80;
5639                 switch (ring_type) {
5640                 case HWRM_RING_ALLOC_TX:
5641                         db->db_key32 = DB_KEY_TX;
5642                         break;
5643                 case HWRM_RING_ALLOC_RX:
5644                 case HWRM_RING_ALLOC_AGG:
5645                         db->db_key32 = DB_KEY_RX;
5646                         break;
5647                 case HWRM_RING_ALLOC_CMPL:
5648                         db->db_key32 = DB_KEY_CP;
5649                         break;
5650                 }
5651         }
5652 }
5653
5654 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5655 {
5656         bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
5657         int i, rc = 0;
5658         u32 type;
5659
5660         if (bp->flags & BNXT_FLAG_CHIP_P5)
5661                 type = HWRM_RING_ALLOC_NQ;
5662         else
5663                 type = HWRM_RING_ALLOC_CMPL;
5664         for (i = 0; i < bp->cp_nr_rings; i++) {
5665                 struct bnxt_napi *bnapi = bp->bnapi[i];
5666                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5667                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5668                 u32 map_idx = ring->map_idx;
5669                 unsigned int vector;
5670
5671                 vector = bp->irq_tbl[map_idx].vector;
5672                 disable_irq_nosync(vector);
5673                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5674                 if (rc) {
5675                         enable_irq(vector);
5676                         goto err_out;
5677                 }
5678                 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
5679                 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5680                 enable_irq(vector);
5681                 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
5682
5683                 if (!i) {
5684                         rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
5685                         if (rc)
5686                                 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
5687                 }
5688         }
5689
5690         type = HWRM_RING_ALLOC_TX;
5691         for (i = 0; i < bp->tx_nr_rings; i++) {
5692                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5693                 struct bnxt_ring_struct *ring;
5694                 u32 map_idx;
5695
5696                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5697                         struct bnxt_napi *bnapi = txr->bnapi;
5698                         struct bnxt_cp_ring_info *cpr, *cpr2;
5699                         u32 type2 = HWRM_RING_ALLOC_CMPL;
5700
5701                         cpr = &bnapi->cp_ring;
5702                         cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
5703                         ring = &cpr2->cp_ring_struct;
5704                         ring->handle = BNXT_TX_HDL;
5705                         map_idx = bnapi->index;
5706                         rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5707                         if (rc)
5708                                 goto err_out;
5709                         bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5710                                     ring->fw_ring_id);
5711                         bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5712                 }
5713                 ring = &txr->tx_ring_struct;
5714                 map_idx = i;
5715                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5716                 if (rc)
5717                         goto err_out;
5718                 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
5719         }
5720
5721         type = HWRM_RING_ALLOC_RX;
5722         for (i = 0; i < bp->rx_nr_rings; i++) {
5723                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5724                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5725                 struct bnxt_napi *bnapi = rxr->bnapi;
5726                 u32 map_idx = bnapi->index;
5727
5728                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5729                 if (rc)
5730                         goto err_out;
5731                 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
5732                 /* If we have agg rings, post agg buffers first. */
5733                 if (!agg_rings)
5734                         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5735                 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
5736                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5737                         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5738                         u32 type2 = HWRM_RING_ALLOC_CMPL;
5739                         struct bnxt_cp_ring_info *cpr2;
5740
5741                         cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
5742                         ring = &cpr2->cp_ring_struct;
5743                         ring->handle = BNXT_RX_HDL;
5744                         rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5745                         if (rc)
5746                                 goto err_out;
5747                         bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5748                                     ring->fw_ring_id);
5749                         bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5750                 }
5751         }
5752
5753         if (agg_rings) {
5754                 type = HWRM_RING_ALLOC_AGG;
5755                 for (i = 0; i < bp->rx_nr_rings; i++) {
5756                         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5757                         struct bnxt_ring_struct *ring =
5758                                                 &rxr->rx_agg_ring_struct;
5759                         u32 grp_idx = ring->grp_idx;
5760                         u32 map_idx = grp_idx + bp->rx_nr_rings;
5761
5762                         rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5763                         if (rc)
5764                                 goto err_out;
5765
5766                         bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5767                                     ring->fw_ring_id);
5768                         bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
5769                         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5770                         bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
5771                 }
5772         }
5773 err_out:
5774         return rc;
5775 }
5776
5777 static int hwrm_ring_free_send_msg(struct bnxt *bp,
5778                                    struct bnxt_ring_struct *ring,
5779                                    u32 ring_type, int cmpl_ring_id)
5780 {
5781         int rc;
5782         struct hwrm_ring_free_input req = {0};
5783         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
5784         u16 error_code;
5785
5786         if (BNXT_NO_FW_ACCESS(bp))
5787                 return 0;
5788
5789         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
5790         req.ring_type = ring_type;
5791         req.ring_id = cpu_to_le16(ring->fw_ring_id);
5792
5793         mutex_lock(&bp->hwrm_cmd_lock);
5794         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5795         error_code = le16_to_cpu(resp->error_code);
5796         mutex_unlock(&bp->hwrm_cmd_lock);
5797
5798         if (rc || error_code) {
5799                 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
5800                            ring_type, rc, error_code);
5801                 return -EIO;
5802         }
5803         return 0;
5804 }
5805
5806 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
5807 {
5808         u32 type;
5809         int i;
5810
5811         if (!bp->bnapi)
5812                 return;
5813
5814         for (i = 0; i < bp->tx_nr_rings; i++) {
5815                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5816                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
5817
5818                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5819                         u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
5820
5821                         hwrm_ring_free_send_msg(bp, ring,
5822                                                 RING_FREE_REQ_RING_TYPE_TX,
5823                                                 close_path ? cmpl_ring_id :
5824                                                 INVALID_HW_RING_ID);
5825                         ring->fw_ring_id = INVALID_HW_RING_ID;
5826                 }
5827         }
5828
5829         for (i = 0; i < bp->rx_nr_rings; i++) {
5830                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5831                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5832                 u32 grp_idx = rxr->bnapi->index;
5833
5834                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5835                         u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5836
5837                         hwrm_ring_free_send_msg(bp, ring,
5838                                                 RING_FREE_REQ_RING_TYPE_RX,
5839                                                 close_path ? cmpl_ring_id :
5840                                                 INVALID_HW_RING_ID);
5841                         ring->fw_ring_id = INVALID_HW_RING_ID;
5842                         bp->grp_info[grp_idx].rx_fw_ring_id =
5843                                 INVALID_HW_RING_ID;
5844                 }
5845         }
5846
5847         if (bp->flags & BNXT_FLAG_CHIP_P5)
5848                 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
5849         else
5850                 type = RING_FREE_REQ_RING_TYPE_RX;
5851         for (i = 0; i < bp->rx_nr_rings; i++) {
5852                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5853                 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
5854                 u32 grp_idx = rxr->bnapi->index;
5855
5856                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5857                         u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5858
5859                         hwrm_ring_free_send_msg(bp, ring, type,
5860                                                 close_path ? cmpl_ring_id :
5861                                                 INVALID_HW_RING_ID);
5862                         ring->fw_ring_id = INVALID_HW_RING_ID;
5863                         bp->grp_info[grp_idx].agg_fw_ring_id =
5864                                 INVALID_HW_RING_ID;
5865                 }
5866         }
5867
5868         /* The completion rings are about to be freed.  After that the
5869          * IRQ doorbell will not work anymore.  So we need to disable
5870          * IRQ here.
5871          */
5872         bnxt_disable_int_sync(bp);
5873
5874         if (bp->flags & BNXT_FLAG_CHIP_P5)
5875                 type = RING_FREE_REQ_RING_TYPE_NQ;
5876         else
5877                 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
5878         for (i = 0; i < bp->cp_nr_rings; i++) {
5879                 struct bnxt_napi *bnapi = bp->bnapi[i];
5880                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5881                 struct bnxt_ring_struct *ring;
5882                 int j;
5883
5884                 for (j = 0; j < 2; j++) {
5885                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
5886
5887                         if (cpr2) {
5888                                 ring = &cpr2->cp_ring_struct;
5889                                 if (ring->fw_ring_id == INVALID_HW_RING_ID)
5890                                         continue;
5891                                 hwrm_ring_free_send_msg(bp, ring,
5892                                         RING_FREE_REQ_RING_TYPE_L2_CMPL,
5893                                         INVALID_HW_RING_ID);
5894                                 ring->fw_ring_id = INVALID_HW_RING_ID;
5895                         }
5896                 }
5897                 ring = &cpr->cp_ring_struct;
5898                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5899                         hwrm_ring_free_send_msg(bp, ring, type,
5900                                                 INVALID_HW_RING_ID);
5901                         ring->fw_ring_id = INVALID_HW_RING_ID;
5902                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
5903                 }
5904         }
5905 }
5906
5907 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
5908                            bool shared);
5909
5910 static int bnxt_hwrm_get_rings(struct bnxt *bp)
5911 {
5912         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5913         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5914         struct hwrm_func_qcfg_input req = {0};
5915         int rc;
5916
5917         if (bp->hwrm_spec_code < 0x10601)
5918                 return 0;
5919
5920         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5921         req.fid = cpu_to_le16(0xffff);
5922         mutex_lock(&bp->hwrm_cmd_lock);
5923         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5924         if (rc) {
5925                 mutex_unlock(&bp->hwrm_cmd_lock);
5926                 return rc;
5927         }
5928
5929         hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
5930         if (BNXT_NEW_RM(bp)) {
5931                 u16 cp, stats;
5932
5933                 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
5934                 hw_resc->resv_hw_ring_grps =
5935                         le32_to_cpu(resp->alloc_hw_ring_grps);
5936                 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
5937                 cp = le16_to_cpu(resp->alloc_cmpl_rings);
5938                 stats = le16_to_cpu(resp->alloc_stat_ctx);
5939                 hw_resc->resv_irqs = cp;
5940                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5941                         int rx = hw_resc->resv_rx_rings;
5942                         int tx = hw_resc->resv_tx_rings;
5943
5944                         if (bp->flags & BNXT_FLAG_AGG_RINGS)
5945                                 rx >>= 1;
5946                         if (cp < (rx + tx)) {
5947                                 bnxt_trim_rings(bp, &rx, &tx, cp, false);
5948                                 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5949                                         rx <<= 1;
5950                                 hw_resc->resv_rx_rings = rx;
5951                                 hw_resc->resv_tx_rings = tx;
5952                         }
5953                         hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
5954                         hw_resc->resv_hw_ring_grps = rx;
5955                 }
5956                 hw_resc->resv_cp_rings = cp;
5957                 hw_resc->resv_stat_ctxs = stats;
5958         }
5959         mutex_unlock(&bp->hwrm_cmd_lock);
5960         return 0;
5961 }
5962
5963 /* Caller must hold bp->hwrm_cmd_lock */
5964 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
5965 {
5966         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5967         struct hwrm_func_qcfg_input req = {0};
5968         int rc;
5969
5970         if (bp->hwrm_spec_code < 0x10601)
5971                 return 0;
5972
5973         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5974         req.fid = cpu_to_le16(fid);
5975         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5976         if (!rc)
5977                 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
5978
5979         return rc;
5980 }
5981
5982 static bool bnxt_rfs_supported(struct bnxt *bp);
5983
5984 static void
5985 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
5986                              int tx_rings, int rx_rings, int ring_grps,
5987                              int cp_rings, int stats, int vnics)
5988 {
5989         u32 enables = 0;
5990
5991         bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1);
5992         req->fid = cpu_to_le16(0xffff);
5993         enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
5994         req->num_tx_rings = cpu_to_le16(tx_rings);
5995         if (BNXT_NEW_RM(bp)) {
5996                 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
5997                 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5998                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5999                         enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
6000                         enables |= tx_rings + ring_grps ?
6001                                    FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6002                         enables |= rx_rings ?
6003                                 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6004                 } else {
6005                         enables |= cp_rings ?
6006                                    FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6007                         enables |= ring_grps ?
6008                                    FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
6009                                    FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6010                 }
6011                 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
6012
6013                 req->num_rx_rings = cpu_to_le16(rx_rings);
6014                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6015                         req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6016                         req->num_msix = cpu_to_le16(cp_rings);
6017                         req->num_rsscos_ctxs =
6018                                 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6019                 } else {
6020                         req->num_cmpl_rings = cpu_to_le16(cp_rings);
6021                         req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6022                         req->num_rsscos_ctxs = cpu_to_le16(1);
6023                         if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
6024                             bnxt_rfs_supported(bp))
6025                                 req->num_rsscos_ctxs =
6026                                         cpu_to_le16(ring_grps + 1);
6027                 }
6028                 req->num_stat_ctxs = cpu_to_le16(stats);
6029                 req->num_vnics = cpu_to_le16(vnics);
6030         }
6031         req->enables = cpu_to_le32(enables);
6032 }
6033
6034 static void
6035 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
6036                              struct hwrm_func_vf_cfg_input *req, int tx_rings,
6037                              int rx_rings, int ring_grps, int cp_rings,
6038                              int stats, int vnics)
6039 {
6040         u32 enables = 0;
6041
6042         bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
6043         enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
6044         enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
6045                               FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6046         enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
6047         if (bp->flags & BNXT_FLAG_CHIP_P5) {
6048                 enables |= tx_rings + ring_grps ?
6049                            FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6050         } else {
6051                 enables |= cp_rings ?
6052                            FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6053                 enables |= ring_grps ?
6054                            FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
6055         }
6056         enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
6057         enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
6058
6059         req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
6060         req->num_tx_rings = cpu_to_le16(tx_rings);
6061         req->num_rx_rings = cpu_to_le16(rx_rings);
6062         if (bp->flags & BNXT_FLAG_CHIP_P5) {
6063                 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6064                 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6065         } else {
6066                 req->num_cmpl_rings = cpu_to_le16(cp_rings);
6067                 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6068                 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
6069         }
6070         req->num_stat_ctxs = cpu_to_le16(stats);
6071         req->num_vnics = cpu_to_le16(vnics);
6072
6073         req->enables = cpu_to_le32(enables);
6074 }
6075
6076 static int
6077 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6078                            int ring_grps, int cp_rings, int stats, int vnics)
6079 {
6080         struct hwrm_func_cfg_input req = {0};
6081         int rc;
6082
6083         __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6084                                      cp_rings, stats, vnics);
6085         if (!req.enables)
6086                 return 0;
6087
6088         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6089         if (rc)
6090                 return rc;
6091
6092         if (bp->hwrm_spec_code < 0x10601)
6093                 bp->hw_resc.resv_tx_rings = tx_rings;
6094
6095         return bnxt_hwrm_get_rings(bp);
6096 }
6097
6098 static int
6099 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6100                            int ring_grps, int cp_rings, int stats, int vnics)
6101 {
6102         struct hwrm_func_vf_cfg_input req = {0};
6103         int rc;
6104
6105         if (!BNXT_NEW_RM(bp)) {
6106                 bp->hw_resc.resv_tx_rings = tx_rings;
6107                 return 0;
6108         }
6109
6110         __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6111                                      cp_rings, stats, vnics);
6112         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6113         if (rc)
6114                 return rc;
6115
6116         return bnxt_hwrm_get_rings(bp);
6117 }
6118
6119 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
6120                                    int cp, int stat, int vnic)
6121 {
6122         if (BNXT_PF(bp))
6123                 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
6124                                                   vnic);
6125         else
6126                 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
6127                                                   vnic);
6128 }
6129
6130 int bnxt_nq_rings_in_use(struct bnxt *bp)
6131 {
6132         int cp = bp->cp_nr_rings;
6133         int ulp_msix, ulp_base;
6134
6135         ulp_msix = bnxt_get_ulp_msix_num(bp);
6136         if (ulp_msix) {
6137                 ulp_base = bnxt_get_ulp_msix_base(bp);
6138                 cp += ulp_msix;
6139                 if ((ulp_base + ulp_msix) > cp)
6140                         cp = ulp_base + ulp_msix;
6141         }
6142         return cp;
6143 }
6144
6145 static int bnxt_cp_rings_in_use(struct bnxt *bp)
6146 {
6147         int cp;
6148
6149         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6150                 return bnxt_nq_rings_in_use(bp);
6151
6152         cp = bp->tx_nr_rings + bp->rx_nr_rings;
6153         return cp;
6154 }
6155
6156 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
6157 {
6158         int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
6159         int cp = bp->cp_nr_rings;
6160
6161         if (!ulp_stat)
6162                 return cp;
6163
6164         if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
6165                 return bnxt_get_ulp_msix_base(bp) + ulp_stat;
6166
6167         return cp + ulp_stat;
6168 }
6169
6170 /* Check if a default RSS map needs to be setup.  This function is only
6171  * used on older firmware that does not require reserving RX rings.
6172  */
6173 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
6174 {
6175         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6176
6177         /* The RSS map is valid for RX rings set to resv_rx_rings */
6178         if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
6179                 hw_resc->resv_rx_rings = bp->rx_nr_rings;
6180                 if (!netif_is_rxfh_configured(bp->dev))
6181                         bnxt_set_dflt_rss_indir_tbl(bp);
6182         }
6183 }
6184
6185 static bool bnxt_need_reserve_rings(struct bnxt *bp)
6186 {
6187         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6188         int cp = bnxt_cp_rings_in_use(bp);
6189         int nq = bnxt_nq_rings_in_use(bp);
6190         int rx = bp->rx_nr_rings, stat;
6191         int vnic = 1, grp = rx;
6192
6193         if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
6194             bp->hwrm_spec_code >= 0x10601)
6195                 return true;
6196
6197         /* Old firmware does not need RX ring reservations but we still
6198          * need to setup a default RSS map when needed.  With new firmware
6199          * we go through RX ring reservations first and then set up the
6200          * RSS map for the successfully reserved RX rings when needed.
6201          */
6202         if (!BNXT_NEW_RM(bp)) {
6203                 bnxt_check_rss_tbl_no_rmgr(bp);
6204                 return false;
6205         }
6206         if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6207                 vnic = rx + 1;
6208         if (bp->flags & BNXT_FLAG_AGG_RINGS)
6209                 rx <<= 1;
6210         stat = bnxt_get_func_stat_ctxs(bp);
6211         if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
6212             hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
6213             (hw_resc->resv_hw_ring_grps != grp &&
6214              !(bp->flags & BNXT_FLAG_CHIP_P5)))
6215                 return true;
6216         if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
6217             hw_resc->resv_irqs != nq)
6218                 return true;
6219         return false;
6220 }
6221
6222 static int __bnxt_reserve_rings(struct bnxt *bp)
6223 {
6224         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6225         int cp = bnxt_nq_rings_in_use(bp);
6226         int tx = bp->tx_nr_rings;
6227         int rx = bp->rx_nr_rings;
6228         int grp, rx_rings, rc;
6229         int vnic = 1, stat;
6230         bool sh = false;
6231
6232         if (!bnxt_need_reserve_rings(bp))
6233                 return 0;
6234
6235         if (bp->flags & BNXT_FLAG_SHARED_RINGS)
6236                 sh = true;
6237         if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6238                 vnic = rx + 1;
6239         if (bp->flags & BNXT_FLAG_AGG_RINGS)
6240                 rx <<= 1;
6241         grp = bp->rx_nr_rings;
6242         stat = bnxt_get_func_stat_ctxs(bp);
6243
6244         rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
6245         if (rc)
6246                 return rc;
6247
6248         tx = hw_resc->resv_tx_rings;
6249         if (BNXT_NEW_RM(bp)) {
6250                 rx = hw_resc->resv_rx_rings;
6251                 cp = hw_resc->resv_irqs;
6252                 grp = hw_resc->resv_hw_ring_grps;
6253                 vnic = hw_resc->resv_vnics;
6254                 stat = hw_resc->resv_stat_ctxs;
6255         }
6256
6257         rx_rings = rx;
6258         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6259                 if (rx >= 2) {
6260                         rx_rings = rx >> 1;
6261                 } else {
6262                         if (netif_running(bp->dev))
6263                                 return -ENOMEM;
6264
6265                         bp->flags &= ~BNXT_FLAG_AGG_RINGS;
6266                         bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
6267                         bp->dev->hw_features &= ~NETIF_F_LRO;
6268                         bp->dev->features &= ~NETIF_F_LRO;
6269                         bnxt_set_ring_params(bp);
6270                 }
6271         }
6272         rx_rings = min_t(int, rx_rings, grp);
6273         cp = min_t(int, cp, bp->cp_nr_rings);
6274         if (stat > bnxt_get_ulp_stat_ctxs(bp))
6275                 stat -= bnxt_get_ulp_stat_ctxs(bp);
6276         cp = min_t(int, cp, stat);
6277         rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
6278         if (bp->flags & BNXT_FLAG_AGG_RINGS)
6279                 rx = rx_rings << 1;
6280         cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
6281         bp->tx_nr_rings = tx;
6282
6283         /* If we cannot reserve all the RX rings, reset the RSS map only
6284          * if absolutely necessary
6285          */
6286         if (rx_rings != bp->rx_nr_rings) {
6287                 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
6288                             rx_rings, bp->rx_nr_rings);
6289                 if ((bp->dev->priv_flags & IFF_RXFH_CONFIGURED) &&
6290                     (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
6291                      bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
6292                      bnxt_get_max_rss_ring(bp) >= rx_rings)) {
6293                         netdev_warn(bp->dev, "RSS table entries reverting to default\n");
6294                         bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
6295                 }
6296         }
6297         bp->rx_nr_rings = rx_rings;
6298         bp->cp_nr_rings = cp;
6299
6300         if (!tx || !rx || !cp || !grp || !vnic || !stat)
6301                 return -ENOMEM;
6302
6303         if (!netif_is_rxfh_configured(bp->dev))
6304                 bnxt_set_dflt_rss_indir_tbl(bp);
6305
6306         return rc;
6307 }
6308
6309 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6310                                     int ring_grps, int cp_rings, int stats,
6311                                     int vnics)
6312 {
6313         struct hwrm_func_vf_cfg_input req = {0};
6314         u32 flags;
6315
6316         if (!BNXT_NEW_RM(bp))
6317                 return 0;
6318
6319         __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6320                                      cp_rings, stats, vnics);
6321         flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
6322                 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6323                 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6324                 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6325                 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
6326                 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
6327         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6328                 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6329
6330         req.flags = cpu_to_le32(flags);
6331         return hwrm_send_message_silent(bp, &req, sizeof(req),
6332                                         HWRM_CMD_TIMEOUT);
6333 }
6334
6335 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6336                                     int ring_grps, int cp_rings, int stats,
6337                                     int vnics)
6338 {
6339         struct hwrm_func_cfg_input req = {0};
6340         u32 flags;
6341
6342         __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6343                                      cp_rings, stats, vnics);
6344         flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
6345         if (BNXT_NEW_RM(bp)) {
6346                 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6347                          FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6348                          FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6349                          FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
6350                 if (bp->flags & BNXT_FLAG_CHIP_P5)
6351                         flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
6352                                  FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
6353                 else
6354                         flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6355         }
6356
6357         req.flags = cpu_to_le32(flags);
6358         return hwrm_send_message_silent(bp, &req, sizeof(req),
6359                                         HWRM_CMD_TIMEOUT);
6360 }
6361
6362 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6363                                  int ring_grps, int cp_rings, int stats,
6364                                  int vnics)
6365 {
6366         if (bp->hwrm_spec_code < 0x10801)
6367                 return 0;
6368
6369         if (BNXT_PF(bp))
6370                 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
6371                                                 ring_grps, cp_rings, stats,
6372                                                 vnics);
6373
6374         return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6375                                         cp_rings, stats, vnics);
6376 }
6377
6378 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
6379 {
6380         struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6381         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6382         struct hwrm_ring_aggint_qcaps_input req = {0};
6383         int rc;
6384
6385         coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
6386         coal_cap->num_cmpl_dma_aggr_max = 63;
6387         coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
6388         coal_cap->cmpl_aggr_dma_tmr_max = 65535;
6389         coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
6390         coal_cap->int_lat_tmr_min_max = 65535;
6391         coal_cap->int_lat_tmr_max_max = 65535;
6392         coal_cap->num_cmpl_aggr_int_max = 65535;
6393         coal_cap->timer_units = 80;
6394
6395         if (bp->hwrm_spec_code < 0x10902)
6396                 return;
6397
6398         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1);
6399         mutex_lock(&bp->hwrm_cmd_lock);
6400         rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6401         if (!rc) {
6402                 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
6403                 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
6404                 coal_cap->num_cmpl_dma_aggr_max =
6405                         le16_to_cpu(resp->num_cmpl_dma_aggr_max);
6406                 coal_cap->num_cmpl_dma_aggr_during_int_max =
6407                         le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
6408                 coal_cap->cmpl_aggr_dma_tmr_max =
6409                         le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
6410                 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
6411                         le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
6412                 coal_cap->int_lat_tmr_min_max =
6413                         le16_to_cpu(resp->int_lat_tmr_min_max);
6414                 coal_cap->int_lat_tmr_max_max =
6415                         le16_to_cpu(resp->int_lat_tmr_max_max);
6416                 coal_cap->num_cmpl_aggr_int_max =
6417                         le16_to_cpu(resp->num_cmpl_aggr_int_max);
6418                 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
6419         }
6420         mutex_unlock(&bp->hwrm_cmd_lock);
6421 }
6422
6423 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
6424 {
6425         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6426
6427         return usec * 1000 / coal_cap->timer_units;
6428 }
6429
6430 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
6431         struct bnxt_coal *hw_coal,
6432         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
6433 {
6434         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6435         u32 cmpl_params = coal_cap->cmpl_params;
6436         u16 val, tmr, max, flags = 0;
6437
6438         max = hw_coal->bufs_per_record * 128;
6439         if (hw_coal->budget)
6440                 max = hw_coal->bufs_per_record * hw_coal->budget;
6441         max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
6442
6443         val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
6444         req->num_cmpl_aggr_int = cpu_to_le16(val);
6445
6446         val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
6447         req->num_cmpl_dma_aggr = cpu_to_le16(val);
6448
6449         val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
6450                       coal_cap->num_cmpl_dma_aggr_during_int_max);
6451         req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
6452
6453         tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
6454         tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
6455         req->int_lat_tmr_max = cpu_to_le16(tmr);
6456
6457         /* min timer set to 1/2 of interrupt timer */
6458         if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
6459                 val = tmr / 2;
6460                 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
6461                 req->int_lat_tmr_min = cpu_to_le16(val);
6462                 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6463         }
6464
6465         /* buf timer set to 1/4 of interrupt timer */
6466         val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
6467         req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
6468
6469         if (cmpl_params &
6470             RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
6471                 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
6472                 val = clamp_t(u16, tmr, 1,
6473                               coal_cap->cmpl_aggr_dma_tmr_during_int_max);
6474                 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
6475                 req->enables |=
6476                         cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
6477         }
6478
6479         if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
6480                 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
6481         if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
6482             hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
6483                 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
6484         req->flags = cpu_to_le16(flags);
6485         req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
6486 }
6487
6488 /* Caller holds bp->hwrm_cmd_lock */
6489 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
6490                                    struct bnxt_coal *hw_coal)
6491 {
6492         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
6493         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6494         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6495         u32 nq_params = coal_cap->nq_params;
6496         u16 tmr;
6497
6498         if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
6499                 return 0;
6500
6501         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
6502                                -1, -1);
6503         req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
6504         req.flags =
6505                 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
6506
6507         tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
6508         tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
6509         req.int_lat_tmr_min = cpu_to_le16(tmr);
6510         req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6511         return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6512 }
6513
6514 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
6515 {
6516         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
6517         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6518         struct bnxt_coal coal;
6519
6520         /* Tick values in micro seconds.
6521          * 1 coal_buf x bufs_per_record = 1 completion record.
6522          */
6523         memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
6524
6525         coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
6526         coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
6527
6528         if (!bnapi->rx_ring)
6529                 return -ENODEV;
6530
6531         bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6532                                HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6533
6534         bnxt_hwrm_set_coal_params(bp, &coal, &req_rx);
6535
6536         req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
6537
6538         return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
6539                                  HWRM_CMD_TIMEOUT);
6540 }
6541
6542 int bnxt_hwrm_set_coal(struct bnxt *bp)
6543 {
6544         int i, rc = 0;
6545         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
6546                                                            req_tx = {0}, *req;
6547
6548         bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6549                                HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6550         bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
6551                                HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6552
6553         bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx);
6554         bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx);
6555
6556         mutex_lock(&bp->hwrm_cmd_lock);
6557         for (i = 0; i < bp->cp_nr_rings; i++) {
6558                 struct bnxt_napi *bnapi = bp->bnapi[i];
6559                 struct bnxt_coal *hw_coal;
6560                 u16 ring_id;
6561
6562                 req = &req_rx;
6563                 if (!bnapi->rx_ring) {
6564                         ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6565                         req = &req_tx;
6566                 } else {
6567                         ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
6568                 }
6569                 req->ring_id = cpu_to_le16(ring_id);
6570
6571                 rc = _hwrm_send_message(bp, req, sizeof(*req),
6572                                         HWRM_CMD_TIMEOUT);
6573                 if (rc)
6574                         break;
6575
6576                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6577                         continue;
6578
6579                 if (bnapi->rx_ring && bnapi->tx_ring) {
6580                         req = &req_tx;
6581                         ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6582                         req->ring_id = cpu_to_le16(ring_id);
6583                         rc = _hwrm_send_message(bp, req, sizeof(*req),
6584                                                 HWRM_CMD_TIMEOUT);
6585                         if (rc)
6586                                 break;
6587                 }
6588                 if (bnapi->rx_ring)
6589                         hw_coal = &bp->rx_coal;
6590                 else
6591                         hw_coal = &bp->tx_coal;
6592                 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
6593         }
6594         mutex_unlock(&bp->hwrm_cmd_lock);
6595         return rc;
6596 }
6597
6598 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
6599 {
6600         struct hwrm_stat_ctx_clr_stats_input req0 = {0};
6601         struct hwrm_stat_ctx_free_input req = {0};
6602         int i;
6603
6604         if (!bp->bnapi)
6605                 return;
6606
6607         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6608                 return;
6609
6610         bnxt_hwrm_cmd_hdr_init(bp, &req0, HWRM_STAT_CTX_CLR_STATS, -1, -1);
6611         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
6612
6613         mutex_lock(&bp->hwrm_cmd_lock);
6614         for (i = 0; i < bp->cp_nr_rings; i++) {
6615                 struct bnxt_napi *bnapi = bp->bnapi[i];
6616                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6617
6618                 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
6619                         req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
6620                         if (BNXT_FW_MAJ(bp) <= 20) {
6621                                 req0.stat_ctx_id = req.stat_ctx_id;
6622                                 _hwrm_send_message(bp, &req0, sizeof(req0),
6623                                                    HWRM_CMD_TIMEOUT);
6624                         }
6625                         _hwrm_send_message(bp, &req, sizeof(req),
6626                                            HWRM_CMD_TIMEOUT);
6627
6628                         cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
6629                 }
6630         }
6631         mutex_unlock(&bp->hwrm_cmd_lock);
6632 }
6633
6634 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
6635 {
6636         int rc = 0, i;
6637         struct hwrm_stat_ctx_alloc_input req = {0};
6638         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
6639
6640         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6641                 return 0;
6642
6643         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
6644
6645         req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
6646         req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
6647
6648         mutex_lock(&bp->hwrm_cmd_lock);
6649         for (i = 0; i < bp->cp_nr_rings; i++) {
6650                 struct bnxt_napi *bnapi = bp->bnapi[i];
6651                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6652
6653                 req.stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
6654
6655                 rc = _hwrm_send_message(bp, &req, sizeof(req),
6656                                         HWRM_CMD_TIMEOUT);
6657                 if (rc)
6658                         break;
6659
6660                 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
6661
6662                 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
6663         }
6664         mutex_unlock(&bp->hwrm_cmd_lock);
6665         return rc;
6666 }
6667
6668 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
6669 {
6670         struct hwrm_func_qcfg_input req = {0};
6671         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6672         u32 min_db_offset = 0;
6673         u16 flags;
6674         int rc;
6675
6676         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
6677         req.fid = cpu_to_le16(0xffff);
6678         mutex_lock(&bp->hwrm_cmd_lock);
6679         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6680         if (rc)
6681                 goto func_qcfg_exit;
6682
6683 #ifdef CONFIG_BNXT_SRIOV
6684         if (BNXT_VF(bp)) {
6685                 struct bnxt_vf_info *vf = &bp->vf;
6686
6687                 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
6688         } else {
6689                 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
6690         }
6691 #endif
6692         flags = le16_to_cpu(resp->flags);
6693         if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
6694                      FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
6695                 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
6696                 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
6697                         bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
6698         }
6699         if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
6700                 bp->flags |= BNXT_FLAG_MULTI_HOST;
6701         if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
6702                 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
6703
6704         switch (resp->port_partition_type) {
6705         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
6706         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
6707         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
6708                 bp->port_partition_type = resp->port_partition_type;
6709                 break;
6710         }
6711         if (bp->hwrm_spec_code < 0x10707 ||
6712             resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
6713                 bp->br_mode = BRIDGE_MODE_VEB;
6714         else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
6715                 bp->br_mode = BRIDGE_MODE_VEPA;
6716         else
6717                 bp->br_mode = BRIDGE_MODE_UNDEF;
6718
6719         bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
6720         if (!bp->max_mtu)
6721                 bp->max_mtu = BNXT_MAX_MTU;
6722
6723         if (bp->db_size)
6724                 goto func_qcfg_exit;
6725
6726         if (bp->flags & BNXT_FLAG_CHIP_P5) {
6727                 if (BNXT_PF(bp))
6728                         min_db_offset = DB_PF_OFFSET_P5;
6729                 else
6730                         min_db_offset = DB_VF_OFFSET_P5;
6731         }
6732         bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
6733                                  1024);
6734         if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
6735             bp->db_size <= min_db_offset)
6736                 bp->db_size = pci_resource_len(bp->pdev, 2);
6737
6738 func_qcfg_exit:
6739         mutex_unlock(&bp->hwrm_cmd_lock);
6740         return rc;
6741 }
6742
6743 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
6744 {
6745         struct hwrm_func_backing_store_qcaps_input req = {0};
6746         struct hwrm_func_backing_store_qcaps_output *resp =
6747                 bp->hwrm_cmd_resp_addr;
6748         int rc;
6749
6750         if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
6751                 return 0;
6752
6753         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1);
6754         mutex_lock(&bp->hwrm_cmd_lock);
6755         rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6756         if (!rc) {
6757                 struct bnxt_ctx_pg_info *ctx_pg;
6758                 struct bnxt_ctx_mem_info *ctx;
6759                 int i, tqm_rings;
6760
6761                 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
6762                 if (!ctx) {
6763                         rc = -ENOMEM;
6764                         goto ctx_err;
6765                 }
6766                 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
6767                 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
6768                 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
6769                 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
6770                 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
6771                 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
6772                 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
6773                 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
6774                 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
6775                 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
6776                 ctx->vnic_max_vnic_entries =
6777                         le16_to_cpu(resp->vnic_max_vnic_entries);
6778                 ctx->vnic_max_ring_table_entries =
6779                         le16_to_cpu(resp->vnic_max_ring_table_entries);
6780                 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
6781                 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
6782                 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
6783                 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
6784                 ctx->tqm_min_entries_per_ring =
6785                         le32_to_cpu(resp->tqm_min_entries_per_ring);
6786                 ctx->tqm_max_entries_per_ring =
6787                         le32_to_cpu(resp->tqm_max_entries_per_ring);
6788                 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
6789                 if (!ctx->tqm_entries_multiple)
6790                         ctx->tqm_entries_multiple = 1;
6791                 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
6792                 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
6793                 ctx->mrav_num_entries_units =
6794                         le16_to_cpu(resp->mrav_num_entries_units);
6795                 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
6796                 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
6797                 ctx->ctx_kind_initializer = resp->ctx_kind_initializer;
6798                 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
6799                 if (!ctx->tqm_fp_rings_count)
6800                         ctx->tqm_fp_rings_count = bp->max_q;
6801
6802                 tqm_rings = ctx->tqm_fp_rings_count + 1;
6803                 ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
6804                 if (!ctx_pg) {
6805                         kfree(ctx);
6806                         rc = -ENOMEM;
6807                         goto ctx_err;
6808                 }
6809                 for (i = 0; i < tqm_rings; i++, ctx_pg++)
6810                         ctx->tqm_mem[i] = ctx_pg;
6811                 bp->ctx = ctx;
6812         } else {
6813                 rc = 0;
6814         }
6815 ctx_err:
6816         mutex_unlock(&bp->hwrm_cmd_lock);
6817         return rc;
6818 }
6819
6820 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
6821                                   __le64 *pg_dir)
6822 {
6823         u8 pg_size = 0;
6824
6825         if (BNXT_PAGE_SHIFT == 13)
6826                 pg_size = 1 << 4;
6827         else if (BNXT_PAGE_SIZE == 16)
6828                 pg_size = 2 << 4;
6829
6830         *pg_attr = pg_size;
6831         if (rmem->depth >= 1) {
6832                 if (rmem->depth == 2)
6833                         *pg_attr |= 2;
6834                 else
6835                         *pg_attr |= 1;
6836                 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
6837         } else {
6838                 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
6839         }
6840 }
6841
6842 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES                 \
6843         (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP |                \
6844          FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ |               \
6845          FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ |                \
6846          FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC |              \
6847          FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
6848
6849 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
6850 {
6851         struct hwrm_func_backing_store_cfg_input req = {0};
6852         struct bnxt_ctx_mem_info *ctx = bp->ctx;
6853         struct bnxt_ctx_pg_info *ctx_pg;
6854         __le32 *num_entries;
6855         __le64 *pg_dir;
6856         u32 flags = 0;
6857         u8 *pg_attr;
6858         u32 ena;
6859         int i;
6860
6861         if (!ctx)
6862                 return 0;
6863
6864         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1);
6865         req.enables = cpu_to_le32(enables);
6866
6867         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
6868                 ctx_pg = &ctx->qp_mem;
6869                 req.qp_num_entries = cpu_to_le32(ctx_pg->entries);
6870                 req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
6871                 req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
6872                 req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
6873                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6874                                       &req.qpc_pg_size_qpc_lvl,
6875                                       &req.qpc_page_dir);
6876         }
6877         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
6878                 ctx_pg = &ctx->srq_mem;
6879                 req.srq_num_entries = cpu_to_le32(ctx_pg->entries);
6880                 req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
6881                 req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
6882                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6883                                       &req.srq_pg_size_srq_lvl,
6884                                       &req.srq_page_dir);
6885         }
6886         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
6887                 ctx_pg = &ctx->cq_mem;
6888                 req.cq_num_entries = cpu_to_le32(ctx_pg->entries);
6889                 req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
6890                 req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
6891                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl,
6892                                       &req.cq_page_dir);
6893         }
6894         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
6895                 ctx_pg = &ctx->vnic_mem;
6896                 req.vnic_num_vnic_entries =
6897                         cpu_to_le16(ctx->vnic_max_vnic_entries);
6898                 req.vnic_num_ring_table_entries =
6899                         cpu_to_le16(ctx->vnic_max_ring_table_entries);
6900                 req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
6901                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6902                                       &req.vnic_pg_size_vnic_lvl,
6903                                       &req.vnic_page_dir);
6904         }
6905         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
6906                 ctx_pg = &ctx->stat_mem;
6907                 req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
6908                 req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
6909                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6910                                       &req.stat_pg_size_stat_lvl,
6911                                       &req.stat_page_dir);
6912         }
6913         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
6914                 ctx_pg = &ctx->mrav_mem;
6915                 req.mrav_num_entries = cpu_to_le32(ctx_pg->entries);
6916                 if (ctx->mrav_num_entries_units)
6917                         flags |=
6918                         FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
6919                 req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
6920                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6921                                       &req.mrav_pg_size_mrav_lvl,
6922                                       &req.mrav_page_dir);
6923         }
6924         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
6925                 ctx_pg = &ctx->tim_mem;
6926                 req.tim_num_entries = cpu_to_le32(ctx_pg->entries);
6927                 req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
6928                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6929                                       &req.tim_pg_size_tim_lvl,
6930                                       &req.tim_page_dir);
6931         }
6932         for (i = 0, num_entries = &req.tqm_sp_num_entries,
6933              pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
6934              pg_dir = &req.tqm_sp_page_dir,
6935              ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
6936              i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
6937                 if (!(enables & ena))
6938                         continue;
6939
6940                 req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
6941                 ctx_pg = ctx->tqm_mem[i];
6942                 *num_entries = cpu_to_le32(ctx_pg->entries);
6943                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
6944         }
6945         req.flags = cpu_to_le32(flags);
6946         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6947 }
6948
6949 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
6950                                   struct bnxt_ctx_pg_info *ctx_pg)
6951 {
6952         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6953
6954         rmem->page_size = BNXT_PAGE_SIZE;
6955         rmem->pg_arr = ctx_pg->ctx_pg_arr;
6956         rmem->dma_arr = ctx_pg->ctx_dma_arr;
6957         rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
6958         if (rmem->depth >= 1)
6959                 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
6960         return bnxt_alloc_ring(bp, rmem);
6961 }
6962
6963 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
6964                                   struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
6965                                   u8 depth, bool use_init_val)
6966 {
6967         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6968         int rc;
6969
6970         if (!mem_size)
6971                 return -EINVAL;
6972
6973         ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
6974         if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
6975                 ctx_pg->nr_pages = 0;
6976                 return -EINVAL;
6977         }
6978         if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
6979                 int nr_tbls, i;
6980
6981                 rmem->depth = 2;
6982                 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
6983                                              GFP_KERNEL);
6984                 if (!ctx_pg->ctx_pg_tbl)
6985                         return -ENOMEM;
6986                 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
6987                 rmem->nr_pages = nr_tbls;
6988                 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
6989                 if (rc)
6990                         return rc;
6991                 for (i = 0; i < nr_tbls; i++) {
6992                         struct bnxt_ctx_pg_info *pg_tbl;
6993
6994                         pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
6995                         if (!pg_tbl)
6996                                 return -ENOMEM;
6997                         ctx_pg->ctx_pg_tbl[i] = pg_tbl;
6998                         rmem = &pg_tbl->ring_mem;
6999                         rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
7000                         rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
7001                         rmem->depth = 1;
7002                         rmem->nr_pages = MAX_CTX_PAGES;
7003                         if (use_init_val)
7004                                 rmem->init_val = bp->ctx->ctx_kind_initializer;
7005                         if (i == (nr_tbls - 1)) {
7006                                 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
7007
7008                                 if (rem)
7009                                         rmem->nr_pages = rem;
7010                         }
7011                         rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
7012                         if (rc)
7013                                 break;
7014                 }
7015         } else {
7016                 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7017                 if (rmem->nr_pages > 1 || depth)
7018                         rmem->depth = 1;
7019                 if (use_init_val)
7020                         rmem->init_val = bp->ctx->ctx_kind_initializer;
7021                 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7022         }
7023         return rc;
7024 }
7025
7026 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
7027                                   struct bnxt_ctx_pg_info *ctx_pg)
7028 {
7029         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7030
7031         if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
7032             ctx_pg->ctx_pg_tbl) {
7033                 int i, nr_tbls = rmem->nr_pages;
7034
7035                 for (i = 0; i < nr_tbls; i++) {
7036                         struct bnxt_ctx_pg_info *pg_tbl;
7037                         struct bnxt_ring_mem_info *rmem2;
7038
7039                         pg_tbl = ctx_pg->ctx_pg_tbl[i];
7040                         if (!pg_tbl)
7041                                 continue;
7042                         rmem2 = &pg_tbl->ring_mem;
7043                         bnxt_free_ring(bp, rmem2);
7044                         ctx_pg->ctx_pg_arr[i] = NULL;
7045                         kfree(pg_tbl);
7046                         ctx_pg->ctx_pg_tbl[i] = NULL;
7047                 }
7048                 kfree(ctx_pg->ctx_pg_tbl);
7049                 ctx_pg->ctx_pg_tbl = NULL;
7050         }
7051         bnxt_free_ring(bp, rmem);
7052         ctx_pg->nr_pages = 0;
7053 }
7054
7055 static void bnxt_free_ctx_mem(struct bnxt *bp)
7056 {
7057         struct bnxt_ctx_mem_info *ctx = bp->ctx;
7058         int i;
7059
7060         if (!ctx)
7061                 return;
7062
7063         if (ctx->tqm_mem[0]) {
7064                 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
7065                         bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
7066                 kfree(ctx->tqm_mem[0]);
7067                 ctx->tqm_mem[0] = NULL;
7068         }
7069
7070         bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
7071         bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
7072         bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
7073         bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
7074         bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
7075         bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
7076         bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
7077         ctx->flags &= ~BNXT_CTX_FLAG_INITED;
7078 }
7079
7080 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
7081 {
7082         struct bnxt_ctx_pg_info *ctx_pg;
7083         struct bnxt_ctx_mem_info *ctx;
7084         u32 mem_size, ena, entries;
7085         u32 entries_sp, min;
7086         u32 num_mr, num_ah;
7087         u32 extra_srqs = 0;
7088         u32 extra_qps = 0;
7089         u8 pg_lvl = 1;
7090         int i, rc;
7091
7092         rc = bnxt_hwrm_func_backing_store_qcaps(bp);
7093         if (rc) {
7094                 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
7095                            rc);
7096                 return rc;
7097         }
7098         ctx = bp->ctx;
7099         if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
7100                 return 0;
7101
7102         if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
7103                 pg_lvl = 2;
7104                 extra_qps = 65536;
7105                 extra_srqs = 8192;
7106         }
7107
7108         ctx_pg = &ctx->qp_mem;
7109         ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
7110                           extra_qps;
7111         mem_size = ctx->qp_entry_size * ctx_pg->entries;
7112         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
7113         if (rc)
7114                 return rc;
7115
7116         ctx_pg = &ctx->srq_mem;
7117         ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
7118         mem_size = ctx->srq_entry_size * ctx_pg->entries;
7119         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
7120         if (rc)
7121                 return rc;
7122
7123         ctx_pg = &ctx->cq_mem;
7124         ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
7125         mem_size = ctx->cq_entry_size * ctx_pg->entries;
7126         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
7127         if (rc)
7128                 return rc;
7129
7130         ctx_pg = &ctx->vnic_mem;
7131         ctx_pg->entries = ctx->vnic_max_vnic_entries +
7132                           ctx->vnic_max_ring_table_entries;
7133         mem_size = ctx->vnic_entry_size * ctx_pg->entries;
7134         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true);
7135         if (rc)
7136                 return rc;
7137
7138         ctx_pg = &ctx->stat_mem;
7139         ctx_pg->entries = ctx->stat_max_entries;
7140         mem_size = ctx->stat_entry_size * ctx_pg->entries;
7141         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true);
7142         if (rc)
7143                 return rc;
7144
7145         ena = 0;
7146         if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
7147                 goto skip_rdma;
7148
7149         ctx_pg = &ctx->mrav_mem;
7150         /* 128K extra is needed to accommodate static AH context
7151          * allocation by f/w.
7152          */
7153         num_mr = 1024 * 256;
7154         num_ah = 1024 * 128;
7155         ctx_pg->entries = num_mr + num_ah;
7156         mem_size = ctx->mrav_entry_size * ctx_pg->entries;
7157         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, true);
7158         if (rc)
7159                 return rc;
7160         ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
7161         if (ctx->mrav_num_entries_units)
7162                 ctx_pg->entries =
7163                         ((num_mr / ctx->mrav_num_entries_units) << 16) |
7164                          (num_ah / ctx->mrav_num_entries_units);
7165
7166         ctx_pg = &ctx->tim_mem;
7167         ctx_pg->entries = ctx->qp_mem.entries;
7168         mem_size = ctx->tim_entry_size * ctx_pg->entries;
7169         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
7170         if (rc)
7171                 return rc;
7172         ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
7173
7174 skip_rdma:
7175         min = ctx->tqm_min_entries_per_ring;
7176         entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
7177                      2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
7178         entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
7179         entries = ctx->qp_max_l2_entries + extra_qps + ctx->qp_min_qp1_entries;
7180         entries = roundup(entries, ctx->tqm_entries_multiple);
7181         entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
7182         for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
7183                 ctx_pg = ctx->tqm_mem[i];
7184                 ctx_pg->entries = i ? entries : entries_sp;
7185                 mem_size = ctx->tqm_entry_size * ctx_pg->entries;
7186                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
7187                 if (rc)
7188                         return rc;
7189                 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
7190         }
7191         ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
7192         rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
7193         if (rc) {
7194                 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
7195                            rc);
7196                 return rc;
7197         }
7198         ctx->flags |= BNXT_CTX_FLAG_INITED;
7199         return 0;
7200 }
7201
7202 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
7203 {
7204         struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
7205         struct hwrm_func_resource_qcaps_input req = {0};
7206         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7207         int rc;
7208
7209         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1);
7210         req.fid = cpu_to_le16(0xffff);
7211
7212         mutex_lock(&bp->hwrm_cmd_lock);
7213         rc = _hwrm_send_message_silent(bp, &req, sizeof(req),
7214                                        HWRM_CMD_TIMEOUT);
7215         if (rc)
7216                 goto hwrm_func_resc_qcaps_exit;
7217
7218         hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
7219         if (!all)
7220                 goto hwrm_func_resc_qcaps_exit;
7221
7222         hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
7223         hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7224         hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
7225         hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7226         hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
7227         hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7228         hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
7229         hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7230         hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
7231         hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
7232         hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
7233         hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7234         hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
7235         hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7236         hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
7237         hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7238
7239         if (bp->flags & BNXT_FLAG_CHIP_P5) {
7240                 u16 max_msix = le16_to_cpu(resp->max_msix);
7241
7242                 hw_resc->max_nqs = max_msix;
7243                 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
7244         }
7245
7246         if (BNXT_PF(bp)) {
7247                 struct bnxt_pf_info *pf = &bp->pf;
7248
7249                 pf->vf_resv_strategy =
7250                         le16_to_cpu(resp->vf_reservation_strategy);
7251                 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
7252                         pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
7253         }
7254 hwrm_func_resc_qcaps_exit:
7255         mutex_unlock(&bp->hwrm_cmd_lock);
7256         return rc;
7257 }
7258
7259 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
7260 {
7261         int rc = 0;
7262         struct hwrm_func_qcaps_input req = {0};
7263         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
7264         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7265         u32 flags, flags_ext;
7266
7267         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
7268         req.fid = cpu_to_le16(0xffff);
7269
7270         mutex_lock(&bp->hwrm_cmd_lock);
7271         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7272         if (rc)
7273                 goto hwrm_func_qcaps_exit;
7274
7275         flags = le32_to_cpu(resp->flags);
7276         if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
7277                 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
7278         if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
7279                 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
7280         if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
7281                 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
7282         if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
7283                 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
7284         if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
7285                 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
7286         if (flags &  FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
7287                 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
7288         if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
7289                 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
7290         if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
7291                 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
7292
7293         flags_ext = le32_to_cpu(resp->flags_ext);
7294         if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
7295                 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
7296
7297         bp->tx_push_thresh = 0;
7298         if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
7299             BNXT_FW_MAJ(bp) > 217)
7300                 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
7301
7302         hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7303         hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7304         hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7305         hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7306         hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
7307         if (!hw_resc->max_hw_ring_grps)
7308                 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
7309         hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7310         hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7311         hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7312
7313         if (BNXT_PF(bp)) {
7314                 struct bnxt_pf_info *pf = &bp->pf;
7315
7316                 pf->fw_fid = le16_to_cpu(resp->fid);
7317                 pf->port_id = le16_to_cpu(resp->port_id);
7318                 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
7319                 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
7320                 pf->max_vfs = le16_to_cpu(resp->max_vfs);
7321                 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
7322                 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
7323                 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
7324                 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
7325                 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
7326                 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
7327                 bp->flags &= ~BNXT_FLAG_WOL_CAP;
7328                 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
7329                         bp->flags |= BNXT_FLAG_WOL_CAP;
7330         } else {
7331 #ifdef CONFIG_BNXT_SRIOV
7332                 struct bnxt_vf_info *vf = &bp->vf;
7333
7334                 vf->fw_fid = le16_to_cpu(resp->fid);
7335                 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
7336 #endif
7337         }
7338
7339 hwrm_func_qcaps_exit:
7340         mutex_unlock(&bp->hwrm_cmd_lock);
7341         return rc;
7342 }
7343
7344 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
7345
7346 static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
7347 {
7348         int rc;
7349
7350         rc = __bnxt_hwrm_func_qcaps(bp);
7351         if (rc)
7352                 return rc;
7353         rc = bnxt_hwrm_queue_qportcfg(bp);
7354         if (rc) {
7355                 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
7356                 return rc;
7357         }
7358         if (bp->hwrm_spec_code >= 0x10803) {
7359                 rc = bnxt_alloc_ctx_mem(bp);
7360                 if (rc)
7361                         return rc;
7362                 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
7363                 if (!rc)
7364                         bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
7365         }
7366         return 0;
7367 }
7368
7369 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
7370 {
7371         struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
7372         struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
7373         int rc = 0;
7374         u32 flags;
7375
7376         if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
7377                 return 0;
7378
7379         resp = bp->hwrm_cmd_resp_addr;
7380         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, -1, -1);
7381
7382         mutex_lock(&bp->hwrm_cmd_lock);
7383         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7384         if (rc)
7385                 goto hwrm_cfa_adv_qcaps_exit;
7386
7387         flags = le32_to_cpu(resp->flags);
7388         if (flags &
7389             CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
7390                 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
7391
7392 hwrm_cfa_adv_qcaps_exit:
7393         mutex_unlock(&bp->hwrm_cmd_lock);
7394         return rc;
7395 }
7396
7397 static int __bnxt_alloc_fw_health(struct bnxt *bp)
7398 {
7399         if (bp->fw_health)
7400                 return 0;
7401
7402         bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
7403         if (!bp->fw_health)
7404                 return -ENOMEM;
7405
7406         return 0;
7407 }
7408
7409 static int bnxt_alloc_fw_health(struct bnxt *bp)
7410 {
7411         int rc;
7412
7413         if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
7414             !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7415                 return 0;
7416
7417         rc = __bnxt_alloc_fw_health(bp);
7418         if (rc) {
7419                 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
7420                 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7421                 return rc;
7422         }
7423
7424         return 0;
7425 }
7426
7427 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
7428 {
7429         writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
7430                                          BNXT_GRCPF_REG_WINDOW_BASE_OUT +
7431                                          BNXT_FW_HEALTH_WIN_MAP_OFF);
7432 }
7433
7434 static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
7435 {
7436         void __iomem *hs;
7437         u32 status_loc;
7438         u32 reg_type;
7439         u32 sig;
7440
7441         __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
7442         hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
7443
7444         sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
7445         if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
7446                 if (bp->fw_health)
7447                         bp->fw_health->status_reliable = false;
7448                 return;
7449         }
7450
7451         if (__bnxt_alloc_fw_health(bp)) {
7452                 netdev_warn(bp->dev, "no memory for firmware status checks\n");
7453                 return;
7454         }
7455
7456         status_loc = readl(hs + offsetof(struct hcomm_status, fw_status_loc));
7457         bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
7458         reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
7459         if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
7460                 __bnxt_map_fw_health_reg(bp, status_loc);
7461                 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
7462                         BNXT_FW_HEALTH_WIN_OFF(status_loc);
7463         }
7464
7465         bp->fw_health->status_reliable = true;
7466 }
7467
7468 static int bnxt_map_fw_health_regs(struct bnxt *bp)
7469 {
7470         struct bnxt_fw_health *fw_health = bp->fw_health;
7471         u32 reg_base = 0xffffffff;
7472         int i;
7473
7474         /* Only pre-map the monitoring GRC registers using window 3 */
7475         for (i = 0; i < 4; i++) {
7476                 u32 reg = fw_health->regs[i];
7477
7478                 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
7479                         continue;
7480                 if (reg_base == 0xffffffff)
7481                         reg_base = reg & BNXT_GRC_BASE_MASK;
7482                 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
7483                         return -ERANGE;
7484                 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
7485         }
7486         if (reg_base == 0xffffffff)
7487                 return 0;
7488
7489         __bnxt_map_fw_health_reg(bp, reg_base);
7490         return 0;
7491 }
7492
7493 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
7494 {
7495         struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
7496         struct bnxt_fw_health *fw_health = bp->fw_health;
7497         struct hwrm_error_recovery_qcfg_input req = {0};
7498         int rc, i;
7499
7500         if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7501                 return 0;
7502
7503         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG, -1, -1);
7504         mutex_lock(&bp->hwrm_cmd_lock);
7505         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7506         if (rc)
7507                 goto err_recovery_out;
7508         fw_health->flags = le32_to_cpu(resp->flags);
7509         if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
7510             !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
7511                 rc = -EINVAL;
7512                 goto err_recovery_out;
7513         }
7514         fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
7515         fw_health->master_func_wait_dsecs =
7516                 le32_to_cpu(resp->master_func_wait_period);
7517         fw_health->normal_func_wait_dsecs =
7518                 le32_to_cpu(resp->normal_func_wait_period);
7519         fw_health->post_reset_wait_dsecs =
7520                 le32_to_cpu(resp->master_func_wait_period_after_reset);
7521         fw_health->post_reset_max_wait_dsecs =
7522                 le32_to_cpu(resp->max_bailout_time_after_reset);
7523         fw_health->regs[BNXT_FW_HEALTH_REG] =
7524                 le32_to_cpu(resp->fw_health_status_reg);
7525         fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
7526                 le32_to_cpu(resp->fw_heartbeat_reg);
7527         fw_health->regs[BNXT_FW_RESET_CNT_REG] =
7528                 le32_to_cpu(resp->fw_reset_cnt_reg);
7529         fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
7530                 le32_to_cpu(resp->reset_inprogress_reg);
7531         fw_health->fw_reset_inprog_reg_mask =
7532                 le32_to_cpu(resp->reset_inprogress_reg_mask);
7533         fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
7534         if (fw_health->fw_reset_seq_cnt >= 16) {
7535                 rc = -EINVAL;
7536                 goto err_recovery_out;
7537         }
7538         for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
7539                 fw_health->fw_reset_seq_regs[i] =
7540                         le32_to_cpu(resp->reset_reg[i]);
7541                 fw_health->fw_reset_seq_vals[i] =
7542                         le32_to_cpu(resp->reset_reg_val[i]);
7543                 fw_health->fw_reset_seq_delay_msec[i] =
7544                         resp->delay_after_reset[i];
7545         }
7546 err_recovery_out:
7547         mutex_unlock(&bp->hwrm_cmd_lock);
7548         if (!rc)
7549                 rc = bnxt_map_fw_health_regs(bp);
7550         if (rc)
7551                 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7552         return rc;
7553 }
7554
7555 static int bnxt_hwrm_func_reset(struct bnxt *bp)
7556 {
7557         struct hwrm_func_reset_input req = {0};
7558
7559         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
7560         req.enables = 0;
7561
7562         return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
7563 }
7564
7565 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
7566 {
7567         struct hwrm_nvm_get_dev_info_output nvm_info;
7568
7569         if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
7570                 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
7571                          nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
7572                          nvm_info.nvm_cfg_ver_upd);
7573 }
7574
7575 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
7576 {
7577         int rc = 0;
7578         struct hwrm_queue_qportcfg_input req = {0};
7579         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
7580         u8 i, j, *qptr;
7581         bool no_rdma;
7582
7583         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
7584
7585         mutex_lock(&bp->hwrm_cmd_lock);
7586         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7587         if (rc)
7588                 goto qportcfg_exit;
7589
7590         if (!resp->max_configurable_queues) {
7591                 rc = -EINVAL;
7592                 goto qportcfg_exit;
7593         }
7594         bp->max_tc = resp->max_configurable_queues;
7595         bp->max_lltc = resp->max_configurable_lossless_queues;
7596         if (bp->max_tc > BNXT_MAX_QUEUE)
7597                 bp->max_tc = BNXT_MAX_QUEUE;
7598
7599         no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
7600         qptr = &resp->queue_id0;
7601         for (i = 0, j = 0; i < bp->max_tc; i++) {
7602                 bp->q_info[j].queue_id = *qptr;
7603                 bp->q_ids[i] = *qptr++;
7604                 bp->q_info[j].queue_profile = *qptr++;
7605                 bp->tc_to_qidx[j] = j;
7606                 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
7607                     (no_rdma && BNXT_PF(bp)))
7608                         j++;
7609         }
7610         bp->max_q = bp->max_tc;
7611         bp->max_tc = max_t(u8, j, 1);
7612
7613         if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
7614                 bp->max_tc = 1;
7615
7616         if (bp->max_lltc > bp->max_tc)
7617                 bp->max_lltc = bp->max_tc;
7618
7619 qportcfg_exit:
7620         mutex_unlock(&bp->hwrm_cmd_lock);
7621         return rc;
7622 }
7623
7624 static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent)
7625 {
7626         struct hwrm_ver_get_input req = {0};
7627         int rc;
7628
7629         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
7630         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
7631         req.hwrm_intf_min = HWRM_VERSION_MINOR;
7632         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
7633
7634         rc = bnxt_hwrm_do_send_msg(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT,
7635                                    silent);
7636         return rc;
7637 }
7638
7639 static int bnxt_hwrm_ver_get(struct bnxt *bp)
7640 {
7641         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
7642         u16 fw_maj, fw_min, fw_bld, fw_rsv;
7643         u32 dev_caps_cfg, hwrm_ver;
7644         int rc, len;
7645
7646         bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
7647         mutex_lock(&bp->hwrm_cmd_lock);
7648         rc = __bnxt_hwrm_ver_get(bp, false);
7649         if (rc)
7650                 goto hwrm_ver_get_exit;
7651
7652         memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
7653
7654         bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
7655                              resp->hwrm_intf_min_8b << 8 |
7656                              resp->hwrm_intf_upd_8b;
7657         if (resp->hwrm_intf_maj_8b < 1) {
7658                 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
7659                             resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7660                             resp->hwrm_intf_upd_8b);
7661                 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
7662         }
7663
7664         hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
7665                         HWRM_VERSION_UPDATE;
7666
7667         if (bp->hwrm_spec_code > hwrm_ver)
7668                 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
7669                          HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
7670                          HWRM_VERSION_UPDATE);
7671         else
7672                 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
7673                          resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7674                          resp->hwrm_intf_upd_8b);
7675
7676         fw_maj = le16_to_cpu(resp->hwrm_fw_major);
7677         if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
7678                 fw_min = le16_to_cpu(resp->hwrm_fw_minor);
7679                 fw_bld = le16_to_cpu(resp->hwrm_fw_build);
7680                 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
7681                 len = FW_VER_STR_LEN;
7682         } else {
7683                 fw_maj = resp->hwrm_fw_maj_8b;
7684                 fw_min = resp->hwrm_fw_min_8b;
7685                 fw_bld = resp->hwrm_fw_bld_8b;
7686                 fw_rsv = resp->hwrm_fw_rsvd_8b;
7687                 len = BC_HWRM_STR_LEN;
7688         }
7689         bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
7690         snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
7691                  fw_rsv);
7692
7693         if (strlen(resp->active_pkg_name)) {
7694                 int fw_ver_len = strlen(bp->fw_ver_str);
7695
7696                 snprintf(bp->fw_ver_str + fw_ver_len,
7697                          FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
7698                          resp->active_pkg_name);
7699                 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
7700         }
7701
7702         bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
7703         if (!bp->hwrm_cmd_timeout)
7704                 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
7705
7706         if (resp->hwrm_intf_maj_8b >= 1) {
7707                 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
7708                 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
7709         }
7710         if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
7711                 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
7712
7713         bp->chip_num = le16_to_cpu(resp->chip_num);
7714         bp->chip_rev = resp->chip_rev;
7715         if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
7716             !resp->chip_metal)
7717                 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
7718
7719         dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
7720         if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
7721             (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
7722                 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
7723
7724         if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
7725                 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
7726
7727         if (dev_caps_cfg &
7728             VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
7729                 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
7730
7731         if (dev_caps_cfg &
7732             VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
7733                 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
7734
7735         if (dev_caps_cfg &
7736             VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
7737                 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
7738
7739 hwrm_ver_get_exit:
7740         mutex_unlock(&bp->hwrm_cmd_lock);
7741         return rc;
7742 }
7743
7744 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
7745 {
7746         struct hwrm_fw_set_time_input req = {0};
7747         struct tm tm;
7748         time64_t now = ktime_get_real_seconds();
7749
7750         if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
7751             bp->hwrm_spec_code < 0x10400)
7752                 return -EOPNOTSUPP;
7753
7754         time64_to_tm(now, 0, &tm);
7755         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
7756         req.year = cpu_to_le16(1900 + tm.tm_year);
7757         req.month = 1 + tm.tm_mon;
7758         req.day = tm.tm_mday;
7759         req.hour = tm.tm_hour;
7760         req.minute = tm.tm_min;
7761         req.second = tm.tm_sec;
7762         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7763 }
7764
7765 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
7766 {
7767         u64 sw_tmp;
7768
7769         sw_tmp = (*sw & ~mask) | hw;
7770         if (hw < (*sw & mask))
7771                 sw_tmp += mask + 1;
7772         WRITE_ONCE(*sw, sw_tmp);
7773 }
7774
7775 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
7776                                     int count, bool ignore_zero)
7777 {
7778         int i;
7779
7780         for (i = 0; i < count; i++) {
7781                 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
7782
7783                 if (ignore_zero && !hw)
7784                         continue;
7785
7786                 if (masks[i] == -1ULL)
7787                         sw_stats[i] = hw;
7788                 else
7789                         bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
7790         }
7791 }
7792
7793 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
7794 {
7795         if (!stats->hw_stats)
7796                 return;
7797
7798         __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
7799                                 stats->hw_masks, stats->len / 8, false);
7800 }
7801
7802 static void bnxt_accumulate_all_stats(struct bnxt *bp)
7803 {
7804         struct bnxt_stats_mem *ring0_stats;
7805         bool ignore_zero = false;
7806         int i;
7807
7808         /* Chip bug.  Counter intermittently becomes 0. */
7809         if (bp->flags & BNXT_FLAG_CHIP_P5)
7810                 ignore_zero = true;
7811
7812         for (i = 0; i < bp->cp_nr_rings; i++) {
7813                 struct bnxt_napi *bnapi = bp->bnapi[i];
7814                 struct bnxt_cp_ring_info *cpr;
7815                 struct bnxt_stats_mem *stats;
7816
7817                 cpr = &bnapi->cp_ring;
7818                 stats = &cpr->stats;
7819                 if (!i)
7820                         ring0_stats = stats;
7821                 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
7822                                         ring0_stats->hw_masks,
7823                                         ring0_stats->len / 8, ignore_zero);
7824         }
7825         if (bp->flags & BNXT_FLAG_PORT_STATS) {
7826                 struct bnxt_stats_mem *stats = &bp->port_stats;
7827                 __le64 *hw_stats = stats->hw_stats;
7828                 u64 *sw_stats = stats->sw_stats;
7829                 u64 *masks = stats->hw_masks;
7830                 int cnt;
7831
7832                 cnt = sizeof(struct rx_port_stats) / 8;
7833                 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
7834
7835                 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
7836                 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
7837                 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
7838                 cnt = sizeof(struct tx_port_stats) / 8;
7839                 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
7840         }
7841         if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
7842                 bnxt_accumulate_stats(&bp->rx_port_stats_ext);
7843                 bnxt_accumulate_stats(&bp->tx_port_stats_ext);
7844         }
7845 }
7846
7847 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
7848 {
7849         struct bnxt_pf_info *pf = &bp->pf;
7850         struct hwrm_port_qstats_input req = {0};
7851
7852         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
7853                 return 0;
7854
7855         if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
7856                 return -EOPNOTSUPP;
7857
7858         req.flags = flags;
7859         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
7860         req.port_id = cpu_to_le16(pf->port_id);
7861         req.tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
7862                                             BNXT_TX_PORT_STATS_BYTE_OFFSET);
7863         req.rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
7864         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7865 }
7866
7867 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
7868 {
7869         struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
7870         struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
7871         struct hwrm_port_qstats_ext_input req = {0};
7872         struct bnxt_pf_info *pf = &bp->pf;
7873         u32 tx_stat_size;
7874         int rc;
7875
7876         if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
7877                 return 0;
7878
7879         if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
7880                 return -EOPNOTSUPP;
7881
7882         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1);
7883         req.flags = flags;
7884         req.port_id = cpu_to_le16(pf->port_id);
7885         req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
7886         req.rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
7887         tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
7888                        sizeof(struct tx_port_stats_ext) : 0;
7889         req.tx_stat_size = cpu_to_le16(tx_stat_size);
7890         req.tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
7891         mutex_lock(&bp->hwrm_cmd_lock);
7892         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7893         if (!rc) {
7894                 bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
7895                 bp->fw_tx_stats_ext_size = tx_stat_size ?
7896                         le16_to_cpu(resp->tx_stat_size) / 8 : 0;
7897         } else {
7898                 bp->fw_rx_stats_ext_size = 0;
7899                 bp->fw_tx_stats_ext_size = 0;
7900         }
7901         if (flags)
7902                 goto qstats_done;
7903
7904         if (bp->fw_tx_stats_ext_size <=
7905             offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
7906                 mutex_unlock(&bp->hwrm_cmd_lock);
7907                 bp->pri2cos_valid = 0;
7908                 return rc;
7909         }
7910
7911         bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
7912         req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
7913
7914         rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT);
7915         if (!rc) {
7916                 struct hwrm_queue_pri2cos_qcfg_output *resp2;
7917                 u8 *pri2cos;
7918                 int i, j;
7919
7920                 resp2 = bp->hwrm_cmd_resp_addr;
7921                 pri2cos = &resp2->pri0_cos_queue_id;
7922                 for (i = 0; i < 8; i++) {
7923                         u8 queue_id = pri2cos[i];
7924                         u8 queue_idx;
7925
7926                         /* Per port queue IDs start from 0, 10, 20, etc */
7927                         queue_idx = queue_id % 10;
7928                         if (queue_idx > BNXT_MAX_QUEUE) {
7929                                 bp->pri2cos_valid = false;
7930                                 goto qstats_done;
7931                         }
7932                         for (j = 0; j < bp->max_q; j++) {
7933                                 if (bp->q_ids[j] == queue_id)
7934                                         bp->pri2cos_idx[i] = queue_idx;
7935                         }
7936                 }
7937                 bp->pri2cos_valid = 1;
7938         }
7939 qstats_done:
7940         mutex_unlock(&bp->hwrm_cmd_lock);
7941         return rc;
7942 }
7943
7944 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
7945 {
7946         if (bp->vxlan_fw_dst_port_id != INVALID_HW_RING_ID)
7947                 bnxt_hwrm_tunnel_dst_port_free(
7948                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
7949         if (bp->nge_fw_dst_port_id != INVALID_HW_RING_ID)
7950                 bnxt_hwrm_tunnel_dst_port_free(
7951                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
7952 }
7953
7954 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
7955 {
7956         int rc, i;
7957         u32 tpa_flags = 0;
7958
7959         if (set_tpa)
7960                 tpa_flags = bp->flags & BNXT_FLAG_TPA;
7961         else if (BNXT_NO_FW_ACCESS(bp))
7962                 return 0;
7963         for (i = 0; i < bp->nr_vnics; i++) {
7964                 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
7965                 if (rc) {
7966                         netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
7967                                    i, rc);
7968                         return rc;
7969                 }
7970         }
7971         return 0;
7972 }
7973
7974 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
7975 {
7976         int i;
7977
7978         for (i = 0; i < bp->nr_vnics; i++)
7979                 bnxt_hwrm_vnic_set_rss(bp, i, false);
7980 }
7981
7982 static void bnxt_clear_vnic(struct bnxt *bp)
7983 {
7984         if (!bp->vnic_info)
7985                 return;
7986
7987         bnxt_hwrm_clear_vnic_filter(bp);
7988         if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
7989                 /* clear all RSS setting before free vnic ctx */
7990                 bnxt_hwrm_clear_vnic_rss(bp);
7991                 bnxt_hwrm_vnic_ctx_free(bp);
7992         }
7993         /* before free the vnic, undo the vnic tpa settings */
7994         if (bp->flags & BNXT_FLAG_TPA)
7995                 bnxt_set_tpa(bp, false);
7996         bnxt_hwrm_vnic_free(bp);
7997         if (bp->flags & BNXT_FLAG_CHIP_P5)
7998                 bnxt_hwrm_vnic_ctx_free(bp);
7999 }
8000
8001 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
8002                                     bool irq_re_init)
8003 {
8004         bnxt_clear_vnic(bp);
8005         bnxt_hwrm_ring_free(bp, close_path);
8006         bnxt_hwrm_ring_grp_free(bp);
8007         if (irq_re_init) {
8008                 bnxt_hwrm_stat_ctx_free(bp);
8009                 bnxt_hwrm_free_tunnel_ports(bp);
8010         }
8011 }
8012
8013 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
8014 {
8015         struct hwrm_func_cfg_input req = {0};
8016
8017         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
8018         req.fid = cpu_to_le16(0xffff);
8019         req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
8020         if (br_mode == BRIDGE_MODE_VEB)
8021                 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
8022         else if (br_mode == BRIDGE_MODE_VEPA)
8023                 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
8024         else
8025                 return -EINVAL;
8026         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8027 }
8028
8029 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
8030 {
8031         struct hwrm_func_cfg_input req = {0};
8032
8033         if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
8034                 return 0;
8035
8036         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
8037         req.fid = cpu_to_le16(0xffff);
8038         req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
8039         req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
8040         if (size == 128)
8041                 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
8042
8043         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8044 }
8045
8046 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8047 {
8048         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
8049         int rc;
8050
8051         if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
8052                 goto skip_rss_ctx;
8053
8054         /* allocate context for vnic */
8055         rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
8056         if (rc) {
8057                 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8058                            vnic_id, rc);
8059                 goto vnic_setup_err;
8060         }
8061         bp->rsscos_nr_ctxs++;
8062
8063         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8064                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
8065                 if (rc) {
8066                         netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
8067                                    vnic_id, rc);
8068                         goto vnic_setup_err;
8069                 }
8070                 bp->rsscos_nr_ctxs++;
8071         }
8072
8073 skip_rss_ctx:
8074         /* configure default vnic, ring grp */
8075         rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8076         if (rc) {
8077                 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8078                            vnic_id, rc);
8079                 goto vnic_setup_err;
8080         }
8081
8082         /* Enable RSS hashing on vnic */
8083         rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
8084         if (rc) {
8085                 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
8086                            vnic_id, rc);
8087                 goto vnic_setup_err;
8088         }
8089
8090         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8091                 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8092                 if (rc) {
8093                         netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8094                                    vnic_id, rc);
8095                 }
8096         }
8097
8098 vnic_setup_err:
8099         return rc;
8100 }
8101
8102 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
8103 {
8104         int rc, i, nr_ctxs;
8105
8106         nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
8107         for (i = 0; i < nr_ctxs; i++) {
8108                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
8109                 if (rc) {
8110                         netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
8111                                    vnic_id, i, rc);
8112                         break;
8113                 }
8114                 bp->rsscos_nr_ctxs++;
8115         }
8116         if (i < nr_ctxs)
8117                 return -ENOMEM;
8118
8119         rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
8120         if (rc) {
8121                 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
8122                            vnic_id, rc);
8123                 return rc;
8124         }
8125         rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8126         if (rc) {
8127                 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8128                            vnic_id, rc);
8129                 return rc;
8130         }
8131         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8132                 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8133                 if (rc) {
8134                         netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8135                                    vnic_id, rc);
8136                 }
8137         }
8138         return rc;
8139 }
8140
8141 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8142 {
8143         if (bp->flags & BNXT_FLAG_CHIP_P5)
8144                 return __bnxt_setup_vnic_p5(bp, vnic_id);
8145         else
8146                 return __bnxt_setup_vnic(bp, vnic_id);
8147 }
8148
8149 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
8150 {
8151 #ifdef CONFIG_RFS_ACCEL
8152         int i, rc = 0;
8153
8154         if (bp->flags & BNXT_FLAG_CHIP_P5)
8155                 return 0;
8156
8157         for (i = 0; i < bp->rx_nr_rings; i++) {
8158                 struct bnxt_vnic_info *vnic;
8159                 u16 vnic_id = i + 1;
8160                 u16 ring_id = i;
8161
8162                 if (vnic_id >= bp->nr_vnics)
8163                         break;
8164
8165                 vnic = &bp->vnic_info[vnic_id];
8166                 vnic->flags |= BNXT_VNIC_RFS_FLAG;
8167                 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
8168                         vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
8169                 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
8170                 if (rc) {
8171                         netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8172                                    vnic_id, rc);
8173                         break;
8174                 }
8175                 rc = bnxt_setup_vnic(bp, vnic_id);
8176                 if (rc)
8177                         break;
8178         }
8179         return rc;
8180 #else
8181         return 0;
8182 #endif
8183 }
8184
8185 /* Allow PF and VF with default VLAN to be in promiscuous mode */
8186 static bool bnxt_promisc_ok(struct bnxt *bp)
8187 {
8188 #ifdef CONFIG_BNXT_SRIOV
8189         if (BNXT_VF(bp) && !bp->vf.vlan)
8190                 return false;
8191 #endif
8192         return true;
8193 }
8194
8195 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
8196 {
8197         unsigned int rc = 0;
8198
8199         rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
8200         if (rc) {
8201                 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8202                            rc);
8203                 return rc;
8204         }
8205
8206         rc = bnxt_hwrm_vnic_cfg(bp, 1);
8207         if (rc) {
8208                 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8209                            rc);
8210                 return rc;
8211         }
8212         return rc;
8213 }
8214
8215 static int bnxt_cfg_rx_mode(struct bnxt *);
8216 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
8217
8218 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
8219 {
8220         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8221         int rc = 0;
8222         unsigned int rx_nr_rings = bp->rx_nr_rings;
8223
8224         if (irq_re_init) {
8225                 rc = bnxt_hwrm_stat_ctx_alloc(bp);
8226                 if (rc) {
8227                         netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
8228                                    rc);
8229                         goto err_out;
8230                 }
8231         }
8232
8233         rc = bnxt_hwrm_ring_alloc(bp);
8234         if (rc) {
8235                 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
8236                 goto err_out;
8237         }
8238
8239         rc = bnxt_hwrm_ring_grp_alloc(bp);
8240         if (rc) {
8241                 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
8242                 goto err_out;
8243         }
8244
8245         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8246                 rx_nr_rings--;
8247
8248         /* default vnic 0 */
8249         rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
8250         if (rc) {
8251                 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
8252                 goto err_out;
8253         }
8254
8255         rc = bnxt_setup_vnic(bp, 0);
8256         if (rc)
8257                 goto err_out;
8258
8259         if (bp->flags & BNXT_FLAG_RFS) {
8260                 rc = bnxt_alloc_rfs_vnics(bp);
8261                 if (rc)
8262                         goto err_out;
8263         }
8264
8265         if (bp->flags & BNXT_FLAG_TPA) {
8266                 rc = bnxt_set_tpa(bp, true);
8267                 if (rc)
8268                         goto err_out;
8269         }
8270
8271         if (BNXT_VF(bp))
8272                 bnxt_update_vf_mac(bp);
8273
8274         /* Filter for default vnic 0 */
8275         rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
8276         if (rc) {
8277                 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
8278                 goto err_out;
8279         }
8280         vnic->uc_filter_count = 1;
8281
8282         vnic->rx_mask = 0;
8283         if (bp->dev->flags & IFF_BROADCAST)
8284                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
8285
8286         if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
8287                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8288
8289         if (bp->dev->flags & IFF_ALLMULTI) {
8290                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8291                 vnic->mc_list_count = 0;
8292         } else {
8293                 u32 mask = 0;
8294
8295                 bnxt_mc_list_updated(bp, &mask);
8296                 vnic->rx_mask |= mask;
8297         }
8298
8299         rc = bnxt_cfg_rx_mode(bp);
8300         if (rc)
8301                 goto err_out;
8302
8303         rc = bnxt_hwrm_set_coal(bp);
8304         if (rc)
8305                 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
8306                                 rc);
8307
8308         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8309                 rc = bnxt_setup_nitroa0_vnic(bp);
8310                 if (rc)
8311                         netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
8312                                    rc);
8313         }
8314
8315         if (BNXT_VF(bp)) {
8316                 bnxt_hwrm_func_qcfg(bp);
8317                 netdev_update_features(bp->dev);
8318         }
8319
8320         return 0;
8321
8322 err_out:
8323         bnxt_hwrm_resource_free(bp, 0, true);
8324
8325         return rc;
8326 }
8327
8328 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
8329 {
8330         bnxt_hwrm_resource_free(bp, 1, irq_re_init);
8331         return 0;
8332 }
8333
8334 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
8335 {
8336         bnxt_init_cp_rings(bp);
8337         bnxt_init_rx_rings(bp);
8338         bnxt_init_tx_rings(bp);
8339         bnxt_init_ring_grps(bp, irq_re_init);
8340         bnxt_init_vnics(bp);
8341
8342         return bnxt_init_chip(bp, irq_re_init);
8343 }
8344
8345 static int bnxt_set_real_num_queues(struct bnxt *bp)
8346 {
8347         int rc;
8348         struct net_device *dev = bp->dev;
8349
8350         rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
8351                                           bp->tx_nr_rings_xdp);
8352         if (rc)
8353                 return rc;
8354
8355         rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
8356         if (rc)
8357                 return rc;
8358
8359 #ifdef CONFIG_RFS_ACCEL
8360         if (bp->flags & BNXT_FLAG_RFS)
8361                 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
8362 #endif
8363
8364         return rc;
8365 }
8366
8367 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
8368                            bool shared)
8369 {
8370         int _rx = *rx, _tx = *tx;
8371
8372         if (shared) {
8373                 *rx = min_t(int, _rx, max);
8374                 *tx = min_t(int, _tx, max);
8375         } else {
8376                 if (max < 2)
8377                         return -ENOMEM;
8378
8379                 while (_rx + _tx > max) {
8380                         if (_rx > _tx && _rx > 1)
8381                                 _rx--;
8382                         else if (_tx > 1)
8383                                 _tx--;
8384                 }
8385                 *rx = _rx;
8386                 *tx = _tx;
8387         }
8388         return 0;
8389 }
8390
8391 static void bnxt_setup_msix(struct bnxt *bp)
8392 {
8393         const int len = sizeof(bp->irq_tbl[0].name);
8394         struct net_device *dev = bp->dev;
8395         int tcs, i;
8396
8397         tcs = netdev_get_num_tc(dev);
8398         if (tcs) {
8399                 int i, off, count;
8400
8401                 for (i = 0; i < tcs; i++) {
8402                         count = bp->tx_nr_rings_per_tc;
8403                         off = i * count;
8404                         netdev_set_tc_queue(dev, i, count, off);
8405                 }
8406         }
8407
8408         for (i = 0; i < bp->cp_nr_rings; i++) {
8409                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8410                 char *attr;
8411
8412                 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
8413                         attr = "TxRx";
8414                 else if (i < bp->rx_nr_rings)
8415                         attr = "rx";
8416                 else
8417                         attr = "tx";
8418
8419                 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
8420                          attr, i);
8421                 bp->irq_tbl[map_idx].handler = bnxt_msix;
8422         }
8423 }
8424
8425 static void bnxt_setup_inta(struct bnxt *bp)
8426 {
8427         const int len = sizeof(bp->irq_tbl[0].name);
8428
8429         if (netdev_get_num_tc(bp->dev))
8430                 netdev_reset_tc(bp->dev);
8431
8432         snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
8433                  0);
8434         bp->irq_tbl[0].handler = bnxt_inta;
8435 }
8436
8437 static int bnxt_setup_int_mode(struct bnxt *bp)
8438 {
8439         int rc;
8440
8441         if (bp->flags & BNXT_FLAG_USING_MSIX)
8442                 bnxt_setup_msix(bp);
8443         else
8444                 bnxt_setup_inta(bp);
8445
8446         rc = bnxt_set_real_num_queues(bp);
8447         return rc;
8448 }
8449
8450 #ifdef CONFIG_RFS_ACCEL
8451 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
8452 {
8453         return bp->hw_resc.max_rsscos_ctxs;
8454 }
8455
8456 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
8457 {
8458         return bp->hw_resc.max_vnics;
8459 }
8460 #endif
8461
8462 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
8463 {
8464         return bp->hw_resc.max_stat_ctxs;
8465 }
8466
8467 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
8468 {
8469         return bp->hw_resc.max_cp_rings;
8470 }
8471
8472 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
8473 {
8474         unsigned int cp = bp->hw_resc.max_cp_rings;
8475
8476         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8477                 cp -= bnxt_get_ulp_msix_num(bp);
8478
8479         return cp;
8480 }
8481
8482 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
8483 {
8484         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8485
8486         if (bp->flags & BNXT_FLAG_CHIP_P5)
8487                 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
8488
8489         return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
8490 }
8491
8492 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
8493 {
8494         bp->hw_resc.max_irqs = max_irqs;
8495 }
8496
8497 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
8498 {
8499         unsigned int cp;
8500
8501         cp = bnxt_get_max_func_cp_rings_for_en(bp);
8502         if (bp->flags & BNXT_FLAG_CHIP_P5)
8503                 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
8504         else
8505                 return cp - bp->cp_nr_rings;
8506 }
8507
8508 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
8509 {
8510         return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
8511 }
8512
8513 int bnxt_get_avail_msix(struct bnxt *bp, int num)
8514 {
8515         int max_cp = bnxt_get_max_func_cp_rings(bp);
8516         int max_irq = bnxt_get_max_func_irqs(bp);
8517         int total_req = bp->cp_nr_rings + num;
8518         int max_idx, avail_msix;
8519
8520         max_idx = bp->total_irqs;
8521         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8522                 max_idx = min_t(int, bp->total_irqs, max_cp);
8523         avail_msix = max_idx - bp->cp_nr_rings;
8524         if (!BNXT_NEW_RM(bp) || avail_msix >= num)
8525                 return avail_msix;
8526
8527         if (max_irq < total_req) {
8528                 num = max_irq - bp->cp_nr_rings;
8529                 if (num <= 0)
8530                         return 0;
8531         }
8532         return num;
8533 }
8534
8535 static int bnxt_get_num_msix(struct bnxt *bp)
8536 {
8537         if (!BNXT_NEW_RM(bp))
8538                 return bnxt_get_max_func_irqs(bp);
8539
8540         return bnxt_nq_rings_in_use(bp);
8541 }
8542
8543 static int bnxt_init_msix(struct bnxt *bp)
8544 {
8545         int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
8546         struct msix_entry *msix_ent;
8547
8548         total_vecs = bnxt_get_num_msix(bp);
8549         max = bnxt_get_max_func_irqs(bp);
8550         if (total_vecs > max)
8551                 total_vecs = max;
8552
8553         if (!total_vecs)
8554                 return 0;
8555
8556         msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
8557         if (!msix_ent)
8558                 return -ENOMEM;
8559
8560         for (i = 0; i < total_vecs; i++) {
8561                 msix_ent[i].entry = i;
8562                 msix_ent[i].vector = 0;
8563         }
8564
8565         if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
8566                 min = 2;
8567
8568         total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
8569         ulp_msix = bnxt_get_ulp_msix_num(bp);
8570         if (total_vecs < 0 || total_vecs < ulp_msix) {
8571                 rc = -ENODEV;
8572                 goto msix_setup_exit;
8573         }
8574
8575         bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
8576         if (bp->irq_tbl) {
8577                 for (i = 0; i < total_vecs; i++)
8578                         bp->irq_tbl[i].vector = msix_ent[i].vector;
8579
8580                 bp->total_irqs = total_vecs;
8581                 /* Trim rings based upon num of vectors allocated */
8582                 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
8583                                      total_vecs - ulp_msix, min == 1);
8584                 if (rc)
8585                         goto msix_setup_exit;
8586
8587                 bp->cp_nr_rings = (min == 1) ?
8588                                   max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
8589                                   bp->tx_nr_rings + bp->rx_nr_rings;
8590
8591         } else {
8592                 rc = -ENOMEM;
8593                 goto msix_setup_exit;
8594         }
8595         bp->flags |= BNXT_FLAG_USING_MSIX;
8596         kfree(msix_ent);
8597         return 0;
8598
8599 msix_setup_exit:
8600         netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
8601         kfree(bp->irq_tbl);
8602         bp->irq_tbl = NULL;
8603         pci_disable_msix(bp->pdev);
8604         kfree(msix_ent);
8605         return rc;
8606 }
8607
8608 static int bnxt_init_inta(struct bnxt *bp)
8609 {
8610         bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
8611         if (!bp->irq_tbl)
8612                 return -ENOMEM;
8613
8614         bp->total_irqs = 1;
8615         bp->rx_nr_rings = 1;
8616         bp->tx_nr_rings = 1;
8617         bp->cp_nr_rings = 1;
8618         bp->flags |= BNXT_FLAG_SHARED_RINGS;
8619         bp->irq_tbl[0].vector = bp->pdev->irq;
8620         return 0;
8621 }
8622
8623 static int bnxt_init_int_mode(struct bnxt *bp)
8624 {
8625         int rc = 0;
8626
8627         if (bp->flags & BNXT_FLAG_MSIX_CAP)
8628                 rc = bnxt_init_msix(bp);
8629
8630         if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
8631                 /* fallback to INTA */
8632                 rc = bnxt_init_inta(bp);
8633         }
8634         return rc;
8635 }
8636
8637 static void bnxt_clear_int_mode(struct bnxt *bp)
8638 {
8639         if (bp->flags & BNXT_FLAG_USING_MSIX)
8640                 pci_disable_msix(bp->pdev);
8641
8642         kfree(bp->irq_tbl);
8643         bp->irq_tbl = NULL;
8644         bp->flags &= ~BNXT_FLAG_USING_MSIX;
8645 }
8646
8647 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
8648 {
8649         int tcs = netdev_get_num_tc(bp->dev);
8650         bool irq_cleared = false;
8651         int rc;
8652
8653         if (!bnxt_need_reserve_rings(bp))
8654                 return 0;
8655
8656         if (irq_re_init && BNXT_NEW_RM(bp) &&
8657             bnxt_get_num_msix(bp) != bp->total_irqs) {
8658                 bnxt_ulp_irq_stop(bp);
8659                 bnxt_clear_int_mode(bp);
8660                 irq_cleared = true;
8661         }
8662         rc = __bnxt_reserve_rings(bp);
8663         if (irq_cleared) {
8664                 if (!rc)
8665                         rc = bnxt_init_int_mode(bp);
8666                 bnxt_ulp_irq_restart(bp, rc);
8667         }
8668         if (rc) {
8669                 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
8670                 return rc;
8671         }
8672         if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
8673                 netdev_err(bp->dev, "tx ring reservation failure\n");
8674                 netdev_reset_tc(bp->dev);
8675                 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
8676                 return -ENOMEM;
8677         }
8678         return 0;
8679 }
8680
8681 static void bnxt_free_irq(struct bnxt *bp)
8682 {
8683         struct bnxt_irq *irq;
8684         int i;
8685
8686 #ifdef CONFIG_RFS_ACCEL
8687         free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
8688         bp->dev->rx_cpu_rmap = NULL;
8689 #endif
8690         if (!bp->irq_tbl || !bp->bnapi)
8691                 return;
8692
8693         for (i = 0; i < bp->cp_nr_rings; i++) {
8694                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8695
8696                 irq = &bp->irq_tbl[map_idx];
8697                 if (irq->requested) {
8698                         if (irq->have_cpumask) {
8699                                 irq_set_affinity_hint(irq->vector, NULL);
8700                                 free_cpumask_var(irq->cpu_mask);
8701                                 irq->have_cpumask = 0;
8702                         }
8703                         free_irq(irq->vector, bp->bnapi[i]);
8704                 }
8705
8706                 irq->requested = 0;
8707         }
8708 }
8709
8710 static int bnxt_request_irq(struct bnxt *bp)
8711 {
8712         int i, j, rc = 0;
8713         unsigned long flags = 0;
8714 #ifdef CONFIG_RFS_ACCEL
8715         struct cpu_rmap *rmap;
8716 #endif
8717
8718         rc = bnxt_setup_int_mode(bp);
8719         if (rc) {
8720                 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
8721                            rc);
8722                 return rc;
8723         }
8724 #ifdef CONFIG_RFS_ACCEL
8725         rmap = bp->dev->rx_cpu_rmap;
8726 #endif
8727         if (!(bp->flags & BNXT_FLAG_USING_MSIX))
8728                 flags = IRQF_SHARED;
8729
8730         for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
8731                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8732                 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
8733
8734 #ifdef CONFIG_RFS_ACCEL
8735                 if (rmap && bp->bnapi[i]->rx_ring) {
8736                         rc = irq_cpu_rmap_add(rmap, irq->vector);
8737                         if (rc)
8738                                 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
8739                                             j);
8740                         j++;
8741                 }
8742 #endif
8743                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
8744                                  bp->bnapi[i]);
8745                 if (rc)
8746                         break;
8747
8748                 irq->requested = 1;
8749
8750                 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
8751                         int numa_node = dev_to_node(&bp->pdev->dev);
8752
8753                         irq->have_cpumask = 1;
8754                         cpumask_set_cpu(cpumask_local_spread(i, numa_node),
8755                                         irq->cpu_mask);
8756                         rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
8757                         if (rc) {
8758                                 netdev_warn(bp->dev,
8759                                             "Set affinity failed, IRQ = %d\n",
8760                                             irq->vector);
8761                                 break;
8762                         }
8763                 }
8764         }
8765         return rc;
8766 }
8767
8768 static void bnxt_del_napi(struct bnxt *bp)
8769 {
8770         int i;
8771
8772         if (!bp->bnapi)
8773                 return;
8774
8775         for (i = 0; i < bp->cp_nr_rings; i++) {
8776                 struct bnxt_napi *bnapi = bp->bnapi[i];
8777
8778                 __netif_napi_del(&bnapi->napi);
8779         }
8780         /* We called __netif_napi_del(), we need
8781          * to respect an RCU grace period before freeing napi structures.
8782          */
8783         synchronize_net();
8784 }
8785
8786 static void bnxt_init_napi(struct bnxt *bp)
8787 {
8788         int i;
8789         unsigned int cp_nr_rings = bp->cp_nr_rings;
8790         struct bnxt_napi *bnapi;
8791
8792         if (bp->flags & BNXT_FLAG_USING_MSIX) {
8793                 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
8794
8795                 if (bp->flags & BNXT_FLAG_CHIP_P5)
8796                         poll_fn = bnxt_poll_p5;
8797                 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8798                         cp_nr_rings--;
8799                 for (i = 0; i < cp_nr_rings; i++) {
8800                         bnapi = bp->bnapi[i];
8801                         netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
8802                 }
8803                 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8804                         bnapi = bp->bnapi[cp_nr_rings];
8805                         netif_napi_add(bp->dev, &bnapi->napi,
8806                                        bnxt_poll_nitroa0, 64);
8807                 }
8808         } else {
8809                 bnapi = bp->bnapi[0];
8810                 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
8811         }
8812 }
8813
8814 static void bnxt_disable_napi(struct bnxt *bp)
8815 {
8816         int i;
8817
8818         if (!bp->bnapi)
8819                 return;
8820
8821         for (i = 0; i < bp->cp_nr_rings; i++) {
8822                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
8823
8824                 if (bp->bnapi[i]->rx_ring)
8825                         cancel_work_sync(&cpr->dim.work);
8826
8827                 napi_disable(&bp->bnapi[i]->napi);
8828         }
8829 }
8830
8831 static void bnxt_enable_napi(struct bnxt *bp)
8832 {
8833         int i;
8834
8835         for (i = 0; i < bp->cp_nr_rings; i++) {
8836                 struct bnxt_napi *bnapi = bp->bnapi[i];
8837                 struct bnxt_cp_ring_info *cpr;
8838
8839                 cpr = &bnapi->cp_ring;
8840                 if (bnapi->in_reset)
8841                         cpr->sw_stats.rx.rx_resets++;
8842                 bnapi->in_reset = false;
8843
8844                 if (bnapi->rx_ring) {
8845                         INIT_WORK(&cpr->dim.work, bnxt_dim_work);
8846                         cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
8847                 }
8848                 napi_enable(&bnapi->napi);
8849         }
8850 }
8851
8852 void bnxt_tx_disable(struct bnxt *bp)
8853 {
8854         int i;
8855         struct bnxt_tx_ring_info *txr;
8856
8857         if (bp->tx_ring) {
8858                 for (i = 0; i < bp->tx_nr_rings; i++) {
8859                         txr = &bp->tx_ring[i];
8860                         txr->dev_state = BNXT_DEV_STATE_CLOSING;
8861                 }
8862         }
8863         /* Stop all TX queues */
8864         netif_tx_disable(bp->dev);
8865         netif_carrier_off(bp->dev);
8866 }
8867
8868 void bnxt_tx_enable(struct bnxt *bp)
8869 {
8870         int i;
8871         struct bnxt_tx_ring_info *txr;
8872
8873         for (i = 0; i < bp->tx_nr_rings; i++) {
8874                 txr = &bp->tx_ring[i];
8875                 txr->dev_state = 0;
8876         }
8877         netif_tx_wake_all_queues(bp->dev);
8878         if (bp->link_info.link_up)
8879                 netif_carrier_on(bp->dev);
8880 }
8881
8882 static char *bnxt_report_fec(struct bnxt_link_info *link_info)
8883 {
8884         u8 active_fec = link_info->active_fec_sig_mode &
8885                         PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
8886
8887         switch (active_fec) {
8888         default:
8889         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
8890                 return "None";
8891         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
8892                 return "Clause 74 BaseR";
8893         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
8894                 return "Clause 91 RS(528,514)";
8895         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
8896                 return "Clause 91 RS544_1XN";
8897         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
8898                 return "Clause 91 RS(544,514)";
8899         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
8900                 return "Clause 91 RS272_1XN";
8901         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
8902                 return "Clause 91 RS(272,257)";
8903         }
8904 }
8905
8906 static void bnxt_report_link(struct bnxt *bp)
8907 {
8908         if (bp->link_info.link_up) {
8909                 const char *duplex;
8910                 const char *flow_ctrl;
8911                 u32 speed;
8912                 u16 fec;
8913
8914                 netif_carrier_on(bp->dev);
8915                 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
8916                 if (speed == SPEED_UNKNOWN) {
8917                         netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
8918                         return;
8919                 }
8920                 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
8921                         duplex = "full";
8922                 else
8923                         duplex = "half";
8924                 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
8925                         flow_ctrl = "ON - receive & transmit";
8926                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
8927                         flow_ctrl = "ON - transmit";
8928                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
8929                         flow_ctrl = "ON - receive";
8930                 else
8931                         flow_ctrl = "none";
8932                 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
8933                             speed, duplex, flow_ctrl);
8934                 if (bp->flags & BNXT_FLAG_EEE_CAP)
8935                         netdev_info(bp->dev, "EEE is %s\n",
8936                                     bp->eee.eee_active ? "active" :
8937                                                          "not active");
8938                 fec = bp->link_info.fec_cfg;
8939                 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
8940                         netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
8941                                     (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
8942                                     bnxt_report_fec(&bp->link_info));
8943         } else {
8944                 netif_carrier_off(bp->dev);
8945                 netdev_err(bp->dev, "NIC Link is Down\n");
8946         }
8947 }
8948
8949 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
8950 {
8951         if (!resp->supported_speeds_auto_mode &&
8952             !resp->supported_speeds_force_mode &&
8953             !resp->supported_pam4_speeds_auto_mode &&
8954             !resp->supported_pam4_speeds_force_mode)
8955                 return true;
8956         return false;
8957 }
8958
8959 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
8960 {
8961         int rc = 0;
8962         struct hwrm_port_phy_qcaps_input req = {0};
8963         struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
8964         struct bnxt_link_info *link_info = &bp->link_info;
8965
8966         bp->flags &= ~BNXT_FLAG_EEE_CAP;
8967         if (bp->test_info)
8968                 bp->test_info->flags &= ~(BNXT_TEST_FL_EXT_LPBK |
8969                                           BNXT_TEST_FL_AN_PHY_LPBK);
8970         if (bp->hwrm_spec_code < 0x10201)
8971                 return 0;
8972
8973         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
8974
8975         mutex_lock(&bp->hwrm_cmd_lock);
8976         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8977         if (rc)
8978                 goto hwrm_phy_qcaps_exit;
8979
8980         if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
8981                 struct ethtool_eee *eee = &bp->eee;
8982                 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
8983
8984                 bp->flags |= BNXT_FLAG_EEE_CAP;
8985                 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
8986                 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
8987                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
8988                 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
8989                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
8990         }
8991         if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED) {
8992                 if (bp->test_info)
8993                         bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK;
8994         }
8995         if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED) {
8996                 if (bp->test_info)
8997                         bp->test_info->flags |= BNXT_TEST_FL_AN_PHY_LPBK;
8998         }
8999         if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED) {
9000                 if (BNXT_PF(bp))
9001                         bp->fw_cap |= BNXT_FW_CAP_SHARED_PORT_CFG;
9002         }
9003         if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET)
9004                 bp->fw_cap |= BNXT_FW_CAP_PORT_STATS_NO_RESET;
9005
9006         if (bp->hwrm_spec_code >= 0x10a01) {
9007                 if (bnxt_phy_qcaps_no_speed(resp)) {
9008                         link_info->phy_state = BNXT_PHY_STATE_DISABLED;
9009                         netdev_warn(bp->dev, "Ethernet link disabled\n");
9010                 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
9011                         link_info->phy_state = BNXT_PHY_STATE_ENABLED;
9012                         netdev_info(bp->dev, "Ethernet link enabled\n");
9013                         /* Phy re-enabled, reprobe the speeds */
9014                         link_info->support_auto_speeds = 0;
9015                         link_info->support_pam4_auto_speeds = 0;
9016                 }
9017         }
9018         if (resp->supported_speeds_auto_mode)
9019                 link_info->support_auto_speeds =
9020                         le16_to_cpu(resp->supported_speeds_auto_mode);
9021         if (resp->supported_pam4_speeds_auto_mode)
9022                 link_info->support_pam4_auto_speeds =
9023                         le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
9024
9025         bp->port_count = resp->port_cnt;
9026
9027 hwrm_phy_qcaps_exit:
9028         mutex_unlock(&bp->hwrm_cmd_lock);
9029         return rc;
9030 }
9031
9032 static bool bnxt_support_dropped(u16 advertising, u16 supported)
9033 {
9034         u16 diff = advertising ^ supported;
9035
9036         return ((supported | diff) != supported);
9037 }
9038
9039 int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
9040 {
9041         int rc = 0;
9042         struct bnxt_link_info *link_info = &bp->link_info;
9043         struct hwrm_port_phy_qcfg_input req = {0};
9044         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
9045         u8 link_up = link_info->link_up;
9046         bool support_changed = false;
9047
9048         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
9049
9050         mutex_lock(&bp->hwrm_cmd_lock);
9051         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9052         if (rc) {
9053                 mutex_unlock(&bp->hwrm_cmd_lock);
9054                 return rc;
9055         }
9056
9057         memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
9058         link_info->phy_link_status = resp->link;
9059         link_info->duplex = resp->duplex_cfg;
9060         if (bp->hwrm_spec_code >= 0x10800)
9061                 link_info->duplex = resp->duplex_state;
9062         link_info->pause = resp->pause;
9063         link_info->auto_mode = resp->auto_mode;
9064         link_info->auto_pause_setting = resp->auto_pause;
9065         link_info->lp_pause = resp->link_partner_adv_pause;
9066         link_info->force_pause_setting = resp->force_pause;
9067         link_info->duplex_setting = resp->duplex_cfg;
9068         if (link_info->phy_link_status == BNXT_LINK_LINK)
9069                 link_info->link_speed = le16_to_cpu(resp->link_speed);
9070         else
9071                 link_info->link_speed = 0;
9072         link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
9073         link_info->force_pam4_link_speed =
9074                 le16_to_cpu(resp->force_pam4_link_speed);
9075         link_info->support_speeds = le16_to_cpu(resp->support_speeds);
9076         link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
9077         link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
9078         link_info->auto_pam4_link_speeds =
9079                 le16_to_cpu(resp->auto_pam4_link_speed_mask);
9080         link_info->lp_auto_link_speeds =
9081                 le16_to_cpu(resp->link_partner_adv_speeds);
9082         link_info->lp_auto_pam4_link_speeds =
9083                 resp->link_partner_pam4_adv_speeds;
9084         link_info->preemphasis = le32_to_cpu(resp->preemphasis);
9085         link_info->phy_ver[0] = resp->phy_maj;
9086         link_info->phy_ver[1] = resp->phy_min;
9087         link_info->phy_ver[2] = resp->phy_bld;
9088         link_info->media_type = resp->media_type;
9089         link_info->phy_type = resp->phy_type;
9090         link_info->transceiver = resp->xcvr_pkg_type;
9091         link_info->phy_addr = resp->eee_config_phy_addr &
9092                               PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
9093         link_info->module_status = resp->module_status;
9094
9095         if (bp->flags & BNXT_FLAG_EEE_CAP) {
9096                 struct ethtool_eee *eee = &bp->eee;
9097                 u16 fw_speeds;
9098
9099                 eee->eee_active = 0;
9100                 if (resp->eee_config_phy_addr &
9101                     PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
9102                         eee->eee_active = 1;
9103                         fw_speeds = le16_to_cpu(
9104                                 resp->link_partner_adv_eee_link_speed_mask);
9105                         eee->lp_advertised =
9106                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9107                 }
9108
9109                 /* Pull initial EEE config */
9110                 if (!chng_link_state) {
9111                         if (resp->eee_config_phy_addr &
9112                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
9113                                 eee->eee_enabled = 1;
9114
9115                         fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
9116                         eee->advertised =
9117                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9118
9119                         if (resp->eee_config_phy_addr &
9120                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
9121                                 __le32 tmr;
9122
9123                                 eee->tx_lpi_enabled = 1;
9124                                 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
9125                                 eee->tx_lpi_timer = le32_to_cpu(tmr) &
9126                                         PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
9127                         }
9128                 }
9129         }
9130
9131         link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
9132         if (bp->hwrm_spec_code >= 0x10504) {
9133                 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
9134                 link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
9135         }
9136         /* TODO: need to add more logic to report VF link */
9137         if (chng_link_state) {
9138                 if (link_info->phy_link_status == BNXT_LINK_LINK)
9139                         link_info->link_up = 1;
9140                 else
9141                         link_info->link_up = 0;
9142                 if (link_up != link_info->link_up)
9143                         bnxt_report_link(bp);
9144         } else {
9145                 /* alwasy link down if not require to update link state */
9146                 link_info->link_up = 0;
9147         }
9148         mutex_unlock(&bp->hwrm_cmd_lock);
9149
9150         if (!BNXT_PHY_CFG_ABLE(bp))
9151                 return 0;
9152
9153         /* Check if any advertised speeds are no longer supported. The caller
9154          * holds the link_lock mutex, so we can modify link_info settings.
9155          */
9156         if (bnxt_support_dropped(link_info->advertising,
9157                                  link_info->support_auto_speeds)) {
9158                 link_info->advertising = link_info->support_auto_speeds;
9159                 support_changed = true;
9160         }
9161         if (bnxt_support_dropped(link_info->advertising_pam4,
9162                                  link_info->support_pam4_auto_speeds)) {
9163                 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
9164                 support_changed = true;
9165         }
9166         if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
9167                 bnxt_hwrm_set_link_setting(bp, true, false);
9168         return 0;
9169 }
9170
9171 static void bnxt_get_port_module_status(struct bnxt *bp)
9172 {
9173         struct bnxt_link_info *link_info = &bp->link_info;
9174         struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
9175         u8 module_status;
9176
9177         if (bnxt_update_link(bp, true))
9178                 return;
9179
9180         module_status = link_info->module_status;
9181         switch (module_status) {
9182         case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
9183         case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
9184         case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
9185                 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
9186                             bp->pf.port_id);
9187                 if (bp->hwrm_spec_code >= 0x10201) {
9188                         netdev_warn(bp->dev, "Module part number %s\n",
9189                                     resp->phy_vendor_partnumber);
9190                 }
9191                 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
9192                         netdev_warn(bp->dev, "TX is disabled\n");
9193                 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
9194                         netdev_warn(bp->dev, "SFP+ module is shutdown\n");
9195         }
9196 }
9197
9198 static void
9199 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9200 {
9201         if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
9202                 if (bp->hwrm_spec_code >= 0x10201)
9203                         req->auto_pause =
9204                                 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
9205                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9206                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
9207                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9208                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
9209                 req->enables |=
9210                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9211         } else {
9212                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9213                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
9214                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9215                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
9216                 req->enables |=
9217                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
9218                 if (bp->hwrm_spec_code >= 0x10201) {
9219                         req->auto_pause = req->force_pause;
9220                         req->enables |= cpu_to_le32(
9221                                 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9222                 }
9223         }
9224 }
9225
9226 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9227 {
9228         if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
9229                 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
9230                 if (bp->link_info.advertising) {
9231                         req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
9232                         req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
9233                 }
9234                 if (bp->link_info.advertising_pam4) {
9235                         req->enables |=
9236                                 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
9237                         req->auto_link_pam4_speed_mask =
9238                                 cpu_to_le16(bp->link_info.advertising_pam4);
9239                 }
9240                 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
9241                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
9242         } else {
9243                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
9244                 if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
9245                         req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9246                         req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
9247                 } else {
9248                         req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9249                 }
9250         }
9251
9252         /* tell chimp that the setting takes effect immediately */
9253         req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
9254 }
9255
9256 int bnxt_hwrm_set_pause(struct bnxt *bp)
9257 {
9258         struct hwrm_port_phy_cfg_input req = {0};
9259         int rc;
9260
9261         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
9262         bnxt_hwrm_set_pause_common(bp, &req);
9263
9264         if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
9265             bp->link_info.force_link_chng)
9266                 bnxt_hwrm_set_link_common(bp, &req);
9267
9268         mutex_lock(&bp->hwrm_cmd_lock);
9269         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9270         if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
9271                 /* since changing of pause setting doesn't trigger any link
9272                  * change event, the driver needs to update the current pause
9273                  * result upon successfully return of the phy_cfg command
9274                  */
9275                 bp->link_info.pause =
9276                 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
9277                 bp->link_info.auto_pause_setting = 0;
9278                 if (!bp->link_info.force_link_chng)
9279                         bnxt_report_link(bp);
9280         }
9281         bp->link_info.force_link_chng = false;
9282         mutex_unlock(&bp->hwrm_cmd_lock);
9283         return rc;
9284 }
9285
9286 static void bnxt_hwrm_set_eee(struct bnxt *bp,
9287                               struct hwrm_port_phy_cfg_input *req)
9288 {
9289         struct ethtool_eee *eee = &bp->eee;
9290
9291         if (eee->eee_enabled) {
9292                 u16 eee_speeds;
9293                 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
9294
9295                 if (eee->tx_lpi_enabled)
9296                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
9297                 else
9298                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
9299
9300                 req->flags |= cpu_to_le32(flags);
9301                 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
9302                 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
9303                 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
9304         } else {
9305                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
9306         }
9307 }
9308
9309 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
9310 {
9311         struct hwrm_port_phy_cfg_input req = {0};
9312
9313         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
9314         if (set_pause)
9315                 bnxt_hwrm_set_pause_common(bp, &req);
9316
9317         bnxt_hwrm_set_link_common(bp, &req);
9318
9319         if (set_eee)
9320                 bnxt_hwrm_set_eee(bp, &req);
9321         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9322 }
9323
9324 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
9325 {
9326         struct hwrm_port_phy_cfg_input req = {0};
9327
9328         if (!BNXT_SINGLE_PF(bp))
9329                 return 0;
9330
9331         if (pci_num_vf(bp->pdev))
9332                 return 0;
9333
9334         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
9335         req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
9336         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9337 }
9338
9339 static int bnxt_fw_init_one(struct bnxt *bp);
9340
9341 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
9342 {
9343         struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
9344         struct hwrm_func_drv_if_change_input req = {0};
9345         bool resc_reinit = false, fw_reset = false;
9346         u32 flags = 0;
9347         int rc;
9348
9349         if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
9350                 return 0;
9351
9352         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1);
9353         if (up)
9354                 req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
9355         mutex_lock(&bp->hwrm_cmd_lock);
9356         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9357         if (!rc)
9358                 flags = le32_to_cpu(resp->flags);
9359         mutex_unlock(&bp->hwrm_cmd_lock);
9360         if (rc)
9361                 return rc;
9362
9363         if (!up)
9364                 return 0;
9365
9366         if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
9367                 resc_reinit = true;
9368         if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
9369                 fw_reset = true;
9370
9371         if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
9372                 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
9373                 return -ENODEV;
9374         }
9375         if (resc_reinit || fw_reset) {
9376                 if (fw_reset) {
9377                         if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
9378                                 bnxt_ulp_stop(bp);
9379                         bnxt_free_ctx_mem(bp);
9380                         kfree(bp->ctx);
9381                         bp->ctx = NULL;
9382                         bnxt_dcb_free(bp);
9383                         rc = bnxt_fw_init_one(bp);
9384                         if (rc) {
9385                                 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9386                                 return rc;
9387                         }
9388                         bnxt_clear_int_mode(bp);
9389                         rc = bnxt_init_int_mode(bp);
9390                         if (rc) {
9391                                 netdev_err(bp->dev, "init int mode failed\n");
9392                                 return rc;
9393                         }
9394                         set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9395                 }
9396                 if (BNXT_NEW_RM(bp)) {
9397                         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9398
9399                         rc = bnxt_hwrm_func_resc_qcaps(bp, true);
9400                         hw_resc->resv_cp_rings = 0;
9401                         hw_resc->resv_stat_ctxs = 0;
9402                         hw_resc->resv_irqs = 0;
9403                         hw_resc->resv_tx_rings = 0;
9404                         hw_resc->resv_rx_rings = 0;
9405                         hw_resc->resv_hw_ring_grps = 0;
9406                         hw_resc->resv_vnics = 0;
9407                         if (!fw_reset) {
9408                                 bp->tx_nr_rings = 0;
9409                                 bp->rx_nr_rings = 0;
9410                         }
9411                 }
9412         }
9413         return 0;
9414 }
9415
9416 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
9417 {
9418         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
9419         struct hwrm_port_led_qcaps_input req = {0};
9420         struct bnxt_pf_info *pf = &bp->pf;
9421         int rc;
9422
9423         bp->num_leds = 0;
9424         if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
9425                 return 0;
9426
9427         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
9428         req.port_id = cpu_to_le16(pf->port_id);
9429         mutex_lock(&bp->hwrm_cmd_lock);
9430         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9431         if (rc) {
9432                 mutex_unlock(&bp->hwrm_cmd_lock);
9433                 return rc;
9434         }
9435         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
9436                 int i;
9437
9438                 bp->num_leds = resp->num_leds;
9439                 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
9440                                                  bp->num_leds);
9441                 for (i = 0; i < bp->num_leds; i++) {
9442                         struct bnxt_led_info *led = &bp->leds[i];
9443                         __le16 caps = led->led_state_caps;
9444
9445                         if (!led->led_group_id ||
9446                             !BNXT_LED_ALT_BLINK_CAP(caps)) {
9447                                 bp->num_leds = 0;
9448                                 break;
9449                         }
9450                 }
9451         }
9452         mutex_unlock(&bp->hwrm_cmd_lock);
9453         return 0;
9454 }
9455
9456 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
9457 {
9458         struct hwrm_wol_filter_alloc_input req = {0};
9459         struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
9460         int rc;
9461
9462         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
9463         req.port_id = cpu_to_le16(bp->pf.port_id);
9464         req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
9465         req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
9466         memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
9467         mutex_lock(&bp->hwrm_cmd_lock);
9468         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9469         if (!rc)
9470                 bp->wol_filter_id = resp->wol_filter_id;
9471         mutex_unlock(&bp->hwrm_cmd_lock);
9472         return rc;
9473 }
9474
9475 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
9476 {
9477         struct hwrm_wol_filter_free_input req = {0};
9478
9479         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
9480         req.port_id = cpu_to_le16(bp->pf.port_id);
9481         req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
9482         req.wol_filter_id = bp->wol_filter_id;
9483         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9484 }
9485
9486 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
9487 {
9488         struct hwrm_wol_filter_qcfg_input req = {0};
9489         struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
9490         u16 next_handle = 0;
9491         int rc;
9492
9493         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
9494         req.port_id = cpu_to_le16(bp->pf.port_id);
9495         req.handle = cpu_to_le16(handle);
9496         mutex_lock(&bp->hwrm_cmd_lock);
9497         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9498         if (!rc) {
9499                 next_handle = le16_to_cpu(resp->next_handle);
9500                 if (next_handle != 0) {
9501                         if (resp->wol_type ==
9502                             WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
9503                                 bp->wol = 1;
9504                                 bp->wol_filter_id = resp->wol_filter_id;
9505                         }
9506                 }
9507         }
9508         mutex_unlock(&bp->hwrm_cmd_lock);
9509         return next_handle;
9510 }
9511
9512 static void bnxt_get_wol_settings(struct bnxt *bp)
9513 {
9514         u16 handle = 0;
9515
9516         bp->wol = 0;
9517         if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
9518                 return;
9519
9520         do {
9521                 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
9522         } while (handle && handle != 0xffff);
9523 }
9524
9525 #ifdef CONFIG_BNXT_HWMON
9526 static ssize_t bnxt_show_temp(struct device *dev,
9527                               struct device_attribute *devattr, char *buf)
9528 {
9529         struct hwrm_temp_monitor_query_input req = {0};
9530         struct hwrm_temp_monitor_query_output *resp;
9531         struct bnxt *bp = dev_get_drvdata(dev);
9532         u32 len = 0;
9533         int rc;
9534
9535         resp = bp->hwrm_cmd_resp_addr;
9536         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
9537         mutex_lock(&bp->hwrm_cmd_lock);
9538         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9539         if (!rc)
9540                 len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
9541         mutex_unlock(&bp->hwrm_cmd_lock);
9542         return rc ?: len;
9543 }
9544 static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
9545
9546 static struct attribute *bnxt_attrs[] = {
9547         &sensor_dev_attr_temp1_input.dev_attr.attr,
9548         NULL
9549 };
9550 ATTRIBUTE_GROUPS(bnxt);
9551
9552 static void bnxt_hwmon_close(struct bnxt *bp)
9553 {
9554         if (bp->hwmon_dev) {
9555                 hwmon_device_unregister(bp->hwmon_dev);
9556                 bp->hwmon_dev = NULL;
9557         }
9558 }
9559
9560 static void bnxt_hwmon_open(struct bnxt *bp)
9561 {
9562         struct hwrm_temp_monitor_query_input req = {0};
9563         struct pci_dev *pdev = bp->pdev;
9564         int rc;
9565
9566         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
9567         rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9568         if (rc == -EACCES || rc == -EOPNOTSUPP) {
9569                 bnxt_hwmon_close(bp);
9570                 return;
9571         }
9572
9573         if (bp->hwmon_dev)
9574                 return;
9575
9576         bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
9577                                                           DRV_MODULE_NAME, bp,
9578                                                           bnxt_groups);
9579         if (IS_ERR(bp->hwmon_dev)) {
9580                 bp->hwmon_dev = NULL;
9581                 dev_warn(&pdev->dev, "Cannot register hwmon device\n");
9582         }
9583 }
9584 #else
9585 static void bnxt_hwmon_close(struct bnxt *bp)
9586 {
9587 }
9588
9589 static void bnxt_hwmon_open(struct bnxt *bp)
9590 {
9591 }
9592 #endif
9593
9594 static bool bnxt_eee_config_ok(struct bnxt *bp)
9595 {
9596         struct ethtool_eee *eee = &bp->eee;
9597         struct bnxt_link_info *link_info = &bp->link_info;
9598
9599         if (!(bp->flags & BNXT_FLAG_EEE_CAP))
9600                 return true;
9601
9602         if (eee->eee_enabled) {
9603                 u32 advertising =
9604                         _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
9605
9606                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
9607                         eee->eee_enabled = 0;
9608                         return false;
9609                 }
9610                 if (eee->advertised & ~advertising) {
9611                         eee->advertised = advertising & eee->supported;
9612                         return false;
9613                 }
9614         }
9615         return true;
9616 }
9617
9618 static int bnxt_update_phy_setting(struct bnxt *bp)
9619 {
9620         int rc;
9621         bool update_link = false;
9622         bool update_pause = false;
9623         bool update_eee = false;
9624         struct bnxt_link_info *link_info = &bp->link_info;
9625
9626         rc = bnxt_update_link(bp, true);
9627         if (rc) {
9628                 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
9629                            rc);
9630                 return rc;
9631         }
9632         if (!BNXT_SINGLE_PF(bp))
9633                 return 0;
9634
9635         if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
9636             (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
9637             link_info->req_flow_ctrl)
9638                 update_pause = true;
9639         if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
9640             link_info->force_pause_setting != link_info->req_flow_ctrl)
9641                 update_pause = true;
9642         if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
9643                 if (BNXT_AUTO_MODE(link_info->auto_mode))
9644                         update_link = true;
9645                 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
9646                     link_info->req_link_speed != link_info->force_link_speed)
9647                         update_link = true;
9648                 else if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
9649                          link_info->req_link_speed != link_info->force_pam4_link_speed)
9650                         update_link = true;
9651                 if (link_info->req_duplex != link_info->duplex_setting)
9652                         update_link = true;
9653         } else {
9654                 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
9655                         update_link = true;
9656                 if (link_info->advertising != link_info->auto_link_speeds ||
9657                     link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
9658                         update_link = true;
9659         }
9660
9661         /* The last close may have shutdown the link, so need to call
9662          * PHY_CFG to bring it back up.
9663          */
9664         if (!bp->link_info.link_up)
9665                 update_link = true;
9666
9667         if (!bnxt_eee_config_ok(bp))
9668                 update_eee = true;
9669
9670         if (update_link)
9671                 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
9672         else if (update_pause)
9673                 rc = bnxt_hwrm_set_pause(bp);
9674         if (rc) {
9675                 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
9676                            rc);
9677                 return rc;
9678         }
9679
9680         return rc;
9681 }
9682
9683 /* Common routine to pre-map certain register block to different GRC window.
9684  * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
9685  * in PF and 3 windows in VF that can be customized to map in different
9686  * register blocks.
9687  */
9688 static void bnxt_preset_reg_win(struct bnxt *bp)
9689 {
9690         if (BNXT_PF(bp)) {
9691                 /* CAG registers map to GRC window #4 */
9692                 writel(BNXT_CAG_REG_BASE,
9693                        bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
9694         }
9695 }
9696
9697 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
9698
9699 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
9700 {
9701         int rc = 0;
9702
9703         bnxt_preset_reg_win(bp);
9704         netif_carrier_off(bp->dev);
9705         if (irq_re_init) {
9706                 /* Reserve rings now if none were reserved at driver probe. */
9707                 rc = bnxt_init_dflt_ring_mode(bp);
9708                 if (rc) {
9709                         netdev_err(bp->dev, "Failed to reserve default rings at open\n");
9710                         return rc;
9711                 }
9712         }
9713         rc = bnxt_reserve_rings(bp, irq_re_init);
9714         if (rc)
9715                 return rc;
9716         if ((bp->flags & BNXT_FLAG_RFS) &&
9717             !(bp->flags & BNXT_FLAG_USING_MSIX)) {
9718                 /* disable RFS if falling back to INTA */
9719                 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
9720                 bp->flags &= ~BNXT_FLAG_RFS;
9721         }
9722
9723         rc = bnxt_alloc_mem(bp, irq_re_init);
9724         if (rc) {
9725                 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
9726                 goto open_err_free_mem;
9727         }
9728
9729         if (irq_re_init) {
9730                 bnxt_init_napi(bp);
9731                 rc = bnxt_request_irq(bp);
9732                 if (rc) {
9733                         netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
9734                         goto open_err_irq;
9735                 }
9736         }
9737
9738         rc = bnxt_init_nic(bp, irq_re_init);
9739         if (rc) {
9740                 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
9741                 goto open_err_irq;
9742         }
9743
9744         bnxt_enable_napi(bp);
9745         bnxt_debug_dev_init(bp);
9746
9747         if (link_re_init) {
9748                 mutex_lock(&bp->link_lock);
9749                 rc = bnxt_update_phy_setting(bp);
9750                 mutex_unlock(&bp->link_lock);
9751                 if (rc) {
9752                         netdev_warn(bp->dev, "failed to update phy settings\n");
9753                         if (BNXT_SINGLE_PF(bp)) {
9754                                 bp->link_info.phy_retry = true;
9755                                 bp->link_info.phy_retry_expires =
9756                                         jiffies + 5 * HZ;
9757                         }
9758                 }
9759         }
9760
9761         if (irq_re_init)
9762                 udp_tunnel_nic_reset_ntf(bp->dev);
9763
9764         set_bit(BNXT_STATE_OPEN, &bp->state);
9765         bnxt_enable_int(bp);
9766         /* Enable TX queues */
9767         bnxt_tx_enable(bp);
9768         mod_timer(&bp->timer, jiffies + bp->current_interval);
9769         /* Poll link status and check for SFP+ module status */
9770         bnxt_get_port_module_status(bp);
9771
9772         /* VF-reps may need to be re-opened after the PF is re-opened */
9773         if (BNXT_PF(bp))
9774                 bnxt_vf_reps_open(bp);
9775         return 0;
9776
9777 open_err_irq:
9778         bnxt_del_napi(bp);
9779
9780 open_err_free_mem:
9781         bnxt_free_skbs(bp);
9782         bnxt_free_irq(bp);
9783         bnxt_free_mem(bp, true);
9784         return rc;
9785 }
9786
9787 /* rtnl_lock held */
9788 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
9789 {
9790         int rc = 0;
9791
9792         rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
9793         if (rc) {
9794                 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
9795                 dev_close(bp->dev);
9796         }
9797         return rc;
9798 }
9799
9800 /* rtnl_lock held, open the NIC half way by allocating all resources, but
9801  * NAPI, IRQ, and TX are not enabled.  This is mainly used for offline
9802  * self tests.
9803  */
9804 int bnxt_half_open_nic(struct bnxt *bp)
9805 {
9806         int rc = 0;
9807
9808         rc = bnxt_alloc_mem(bp, false);
9809         if (rc) {
9810                 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
9811                 goto half_open_err;
9812         }
9813         rc = bnxt_init_nic(bp, false);
9814         if (rc) {
9815                 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
9816                 goto half_open_err;
9817         }
9818         return 0;
9819
9820 half_open_err:
9821         bnxt_free_skbs(bp);
9822         bnxt_free_mem(bp, false);
9823         dev_close(bp->dev);
9824         return rc;
9825 }
9826
9827 /* rtnl_lock held, this call can only be made after a previous successful
9828  * call to bnxt_half_open_nic().
9829  */
9830 void bnxt_half_close_nic(struct bnxt *bp)
9831 {
9832         bnxt_hwrm_resource_free(bp, false, false);
9833         bnxt_free_skbs(bp);
9834         bnxt_free_mem(bp, false);
9835 }
9836
9837 static void bnxt_reenable_sriov(struct bnxt *bp)
9838 {
9839         if (BNXT_PF(bp)) {
9840                 struct bnxt_pf_info *pf = &bp->pf;
9841                 int n = pf->active_vfs;
9842
9843                 if (n)
9844                         bnxt_cfg_hw_sriov(bp, &n, true);
9845         }
9846 }
9847
9848 static int bnxt_open(struct net_device *dev)
9849 {
9850         struct bnxt *bp = netdev_priv(dev);
9851         int rc;
9852
9853         if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
9854                 netdev_err(bp->dev, "A previous firmware reset did not complete, aborting\n");
9855                 return -ENODEV;
9856         }
9857
9858         rc = bnxt_hwrm_if_change(bp, true);
9859         if (rc)
9860                 return rc;
9861         rc = __bnxt_open_nic(bp, true, true);
9862         if (rc) {
9863                 bnxt_hwrm_if_change(bp, false);
9864         } else {
9865                 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
9866                         if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
9867                                 bnxt_ulp_start(bp, 0);
9868                                 bnxt_reenable_sriov(bp);
9869                         }
9870                 }
9871                 bnxt_hwmon_open(bp);
9872         }
9873
9874         return rc;
9875 }
9876
9877 static bool bnxt_drv_busy(struct bnxt *bp)
9878 {
9879         return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
9880                 test_bit(BNXT_STATE_READ_STATS, &bp->state));
9881 }
9882
9883 static void bnxt_get_ring_stats(struct bnxt *bp,
9884                                 struct rtnl_link_stats64 *stats);
9885
9886 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
9887                              bool link_re_init)
9888 {
9889         /* Close the VF-reps before closing PF */
9890         if (BNXT_PF(bp))
9891                 bnxt_vf_reps_close(bp);
9892
9893         /* Change device state to avoid TX queue wake up's */
9894         bnxt_tx_disable(bp);
9895
9896         clear_bit(BNXT_STATE_OPEN, &bp->state);
9897         smp_mb__after_atomic();
9898         while (bnxt_drv_busy(bp))
9899                 msleep(20);
9900
9901         /* Flush rings and and disable interrupts */
9902         bnxt_shutdown_nic(bp, irq_re_init);
9903
9904         /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
9905
9906         bnxt_debug_dev_exit(bp);
9907         bnxt_disable_napi(bp);
9908         del_timer_sync(&bp->timer);
9909         bnxt_free_skbs(bp);
9910
9911         /* Save ring stats before shutdown */
9912         if (bp->bnapi && irq_re_init)
9913                 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
9914         if (irq_re_init) {
9915                 bnxt_free_irq(bp);
9916                 bnxt_del_napi(bp);
9917         }
9918         bnxt_free_mem(bp, irq_re_init);
9919 }
9920
9921 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
9922 {
9923         int rc = 0;
9924
9925         if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
9926                 /* If we get here, it means firmware reset is in progress
9927                  * while we are trying to close.  We can safely proceed with
9928                  * the close because we are holding rtnl_lock().  Some firmware
9929                  * messages may fail as we proceed to close.  We set the
9930                  * ABORT_ERR flag here so that the FW reset thread will later
9931                  * abort when it gets the rtnl_lock() and sees the flag.
9932                  */
9933                 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
9934                 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9935         }
9936
9937 #ifdef CONFIG_BNXT_SRIOV
9938         if (bp->sriov_cfg) {
9939                 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
9940                                                       !bp->sriov_cfg,
9941                                                       BNXT_SRIOV_CFG_WAIT_TMO);
9942                 if (rc)
9943                         netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
9944         }
9945 #endif
9946         __bnxt_close_nic(bp, irq_re_init, link_re_init);
9947         return rc;
9948 }
9949
9950 static int bnxt_close(struct net_device *dev)
9951 {
9952         struct bnxt *bp = netdev_priv(dev);
9953
9954         bnxt_hwmon_close(bp);
9955         bnxt_close_nic(bp, true, true);
9956         bnxt_hwrm_shutdown_link(bp);
9957         bnxt_hwrm_if_change(bp, false);
9958         return 0;
9959 }
9960
9961 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
9962                                    u16 *val)
9963 {
9964         struct hwrm_port_phy_mdio_read_output *resp = bp->hwrm_cmd_resp_addr;
9965         struct hwrm_port_phy_mdio_read_input req = {0};
9966         int rc;
9967
9968         if (bp->hwrm_spec_code < 0x10a00)
9969                 return -EOPNOTSUPP;
9970
9971         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_READ, -1, -1);
9972         req.port_id = cpu_to_le16(bp->pf.port_id);
9973         req.phy_addr = phy_addr;
9974         req.reg_addr = cpu_to_le16(reg & 0x1f);
9975         if (mdio_phy_id_is_c45(phy_addr)) {
9976                 req.cl45_mdio = 1;
9977                 req.phy_addr = mdio_phy_id_prtad(phy_addr);
9978                 req.dev_addr = mdio_phy_id_devad(phy_addr);
9979                 req.reg_addr = cpu_to_le16(reg);
9980         }
9981
9982         mutex_lock(&bp->hwrm_cmd_lock);
9983         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9984         if (!rc)
9985                 *val = le16_to_cpu(resp->reg_data);
9986         mutex_unlock(&bp->hwrm_cmd_lock);
9987         return rc;
9988 }
9989
9990 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
9991                                     u16 val)
9992 {
9993         struct hwrm_port_phy_mdio_write_input req = {0};
9994
9995         if (bp->hwrm_spec_code < 0x10a00)
9996                 return -EOPNOTSUPP;
9997
9998         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_WRITE, -1, -1);
9999         req.port_id = cpu_to_le16(bp->pf.port_id);
10000         req.phy_addr = phy_addr;
10001         req.reg_addr = cpu_to_le16(reg & 0x1f);
10002         if (mdio_phy_id_is_c45(phy_addr)) {
10003                 req.cl45_mdio = 1;
10004                 req.phy_addr = mdio_phy_id_prtad(phy_addr);
10005                 req.dev_addr = mdio_phy_id_devad(phy_addr);
10006                 req.reg_addr = cpu_to_le16(reg);
10007         }
10008         req.reg_data = cpu_to_le16(val);
10009
10010         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10011 }
10012
10013 /* rtnl_lock held */
10014 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10015 {
10016         struct mii_ioctl_data *mdio = if_mii(ifr);
10017         struct bnxt *bp = netdev_priv(dev);
10018         int rc;
10019
10020         switch (cmd) {
10021         case SIOCGMIIPHY:
10022                 mdio->phy_id = bp->link_info.phy_addr;
10023
10024                 fallthrough;
10025         case SIOCGMIIREG: {
10026                 u16 mii_regval = 0;
10027
10028                 if (!netif_running(dev))
10029                         return -EAGAIN;
10030
10031                 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
10032                                              &mii_regval);
10033                 mdio->val_out = mii_regval;
10034                 return rc;
10035         }
10036
10037         case SIOCSMIIREG:
10038                 if (!netif_running(dev))
10039                         return -EAGAIN;
10040
10041                 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
10042                                                 mdio->val_in);
10043
10044         default:
10045                 /* do nothing */
10046                 break;
10047         }
10048         return -EOPNOTSUPP;
10049 }
10050
10051 static void bnxt_get_ring_stats(struct bnxt *bp,
10052                                 struct rtnl_link_stats64 *stats)
10053 {
10054         int i;
10055
10056         for (i = 0; i < bp->cp_nr_rings; i++) {
10057                 struct bnxt_napi *bnapi = bp->bnapi[i];
10058                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
10059                 u64 *sw = cpr->stats.sw_stats;
10060
10061                 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
10062                 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10063                 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
10064
10065                 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
10066                 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
10067                 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
10068
10069                 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
10070                 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
10071                 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
10072
10073                 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
10074                 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
10075                 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
10076
10077                 stats->rx_missed_errors +=
10078                         BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
10079
10080                 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10081
10082                 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
10083         }
10084 }
10085
10086 static void bnxt_add_prev_stats(struct bnxt *bp,
10087                                 struct rtnl_link_stats64 *stats)
10088 {
10089         struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
10090
10091         stats->rx_packets += prev_stats->rx_packets;
10092         stats->tx_packets += prev_stats->tx_packets;
10093         stats->rx_bytes += prev_stats->rx_bytes;
10094         stats->tx_bytes += prev_stats->tx_bytes;
10095         stats->rx_missed_errors += prev_stats->rx_missed_errors;
10096         stats->multicast += prev_stats->multicast;
10097         stats->tx_dropped += prev_stats->tx_dropped;
10098 }
10099
10100 static void
10101 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
10102 {
10103         struct bnxt *bp = netdev_priv(dev);
10104
10105         set_bit(BNXT_STATE_READ_STATS, &bp->state);
10106         /* Make sure bnxt_close_nic() sees that we are reading stats before
10107          * we check the BNXT_STATE_OPEN flag.
10108          */
10109         smp_mb__after_atomic();
10110         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10111                 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10112                 *stats = bp->net_stats_prev;
10113                 return;
10114         }
10115
10116         bnxt_get_ring_stats(bp, stats);
10117         bnxt_add_prev_stats(bp, stats);
10118
10119         if (bp->flags & BNXT_FLAG_PORT_STATS) {
10120                 u64 *rx = bp->port_stats.sw_stats;
10121                 u64 *tx = bp->port_stats.sw_stats +
10122                           BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10123
10124                 stats->rx_crc_errors =
10125                         BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
10126                 stats->rx_frame_errors =
10127                         BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
10128                 stats->rx_length_errors =
10129                         BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
10130                         BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
10131                         BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
10132                 stats->rx_errors =
10133                         BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
10134                         BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
10135                 stats->collisions =
10136                         BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
10137                 stats->tx_fifo_errors =
10138                         BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
10139                 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
10140         }
10141         clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10142 }
10143
10144 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
10145 {
10146         struct net_device *dev = bp->dev;
10147         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10148         struct netdev_hw_addr *ha;
10149         u8 *haddr;
10150         int mc_count = 0;
10151         bool update = false;
10152         int off = 0;
10153
10154         netdev_for_each_mc_addr(ha, dev) {
10155                 if (mc_count >= BNXT_MAX_MC_ADDRS) {
10156                         *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10157                         vnic->mc_list_count = 0;
10158                         return false;
10159                 }
10160                 haddr = ha->addr;
10161                 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
10162                         memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
10163                         update = true;
10164                 }
10165                 off += ETH_ALEN;
10166                 mc_count++;
10167         }
10168         if (mc_count)
10169                 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
10170
10171         if (mc_count != vnic->mc_list_count) {
10172                 vnic->mc_list_count = mc_count;
10173                 update = true;
10174         }
10175         return update;
10176 }
10177
10178 static bool bnxt_uc_list_updated(struct bnxt *bp)
10179 {
10180         struct net_device *dev = bp->dev;
10181         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10182         struct netdev_hw_addr *ha;
10183         int off = 0;
10184
10185         if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
10186                 return true;
10187
10188         netdev_for_each_uc_addr(ha, dev) {
10189                 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
10190                         return true;
10191
10192                 off += ETH_ALEN;
10193         }
10194         return false;
10195 }
10196
10197 static void bnxt_set_rx_mode(struct net_device *dev)
10198 {
10199         struct bnxt *bp = netdev_priv(dev);
10200         struct bnxt_vnic_info *vnic;
10201         bool mc_update = false;
10202         bool uc_update;
10203         u32 mask;
10204
10205         if (!test_bit(BNXT_STATE_OPEN, &bp->state))
10206                 return;
10207
10208         vnic = &bp->vnic_info[0];
10209         mask = vnic->rx_mask;
10210         mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
10211                   CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
10212                   CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
10213                   CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
10214
10215         if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
10216                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10217
10218         uc_update = bnxt_uc_list_updated(bp);
10219
10220         if (dev->flags & IFF_BROADCAST)
10221                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
10222         if (dev->flags & IFF_ALLMULTI) {
10223                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10224                 vnic->mc_list_count = 0;
10225         } else {
10226                 mc_update = bnxt_mc_list_updated(bp, &mask);
10227         }
10228
10229         if (mask != vnic->rx_mask || uc_update || mc_update) {
10230                 vnic->rx_mask = mask;
10231
10232                 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
10233                 bnxt_queue_sp_work(bp);
10234         }
10235 }
10236
10237 static int bnxt_cfg_rx_mode(struct bnxt *bp)
10238 {
10239         struct net_device *dev = bp->dev;
10240         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10241         struct netdev_hw_addr *ha;
10242         int i, off = 0, rc;
10243         bool uc_update;
10244
10245         netif_addr_lock_bh(dev);
10246         uc_update = bnxt_uc_list_updated(bp);
10247         netif_addr_unlock_bh(dev);
10248
10249         if (!uc_update)
10250                 goto skip_uc;
10251
10252         mutex_lock(&bp->hwrm_cmd_lock);
10253         for (i = 1; i < vnic->uc_filter_count; i++) {
10254                 struct hwrm_cfa_l2_filter_free_input req = {0};
10255
10256                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
10257                                        -1);
10258
10259                 req.l2_filter_id = vnic->fw_l2_filter_id[i];
10260
10261                 rc = _hwrm_send_message(bp, &req, sizeof(req),
10262                                         HWRM_CMD_TIMEOUT);
10263         }
10264         mutex_unlock(&bp->hwrm_cmd_lock);
10265
10266         vnic->uc_filter_count = 1;
10267
10268         netif_addr_lock_bh(dev);
10269         if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
10270                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10271         } else {
10272                 netdev_for_each_uc_addr(ha, dev) {
10273                         memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
10274                         off += ETH_ALEN;
10275                         vnic->uc_filter_count++;
10276                 }
10277         }
10278         netif_addr_unlock_bh(dev);
10279
10280         for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
10281                 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
10282                 if (rc) {
10283                         netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
10284                                    rc);
10285                         vnic->uc_filter_count = i;
10286                         return rc;
10287                 }
10288         }
10289
10290 skip_uc:
10291         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
10292         if (rc && vnic->mc_list_count) {
10293                 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
10294                             rc);
10295                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10296                 vnic->mc_list_count = 0;
10297                 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
10298         }
10299         if (rc)
10300                 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
10301                            rc);
10302
10303         return rc;
10304 }
10305
10306 static bool bnxt_can_reserve_rings(struct bnxt *bp)
10307 {
10308 #ifdef CONFIG_BNXT_SRIOV
10309         if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
10310                 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
10311
10312                 /* No minimum rings were provisioned by the PF.  Don't
10313                  * reserve rings by default when device is down.
10314                  */
10315                 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
10316                         return true;
10317
10318                 if (!netif_running(bp->dev))
10319                         return false;
10320         }
10321 #endif
10322         return true;
10323 }
10324
10325 /* If the chip and firmware supports RFS */
10326 static bool bnxt_rfs_supported(struct bnxt *bp)
10327 {
10328         if (bp->flags & BNXT_FLAG_CHIP_P5) {
10329                 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
10330                         return true;
10331                 return false;
10332         }
10333         if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
10334                 return true;
10335         if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10336                 return true;
10337         return false;
10338 }
10339
10340 /* If runtime conditions support RFS */
10341 static bool bnxt_rfs_capable(struct bnxt *bp)
10342 {
10343 #ifdef CONFIG_RFS_ACCEL
10344         int vnics, max_vnics, max_rss_ctxs;
10345
10346         if (bp->flags & BNXT_FLAG_CHIP_P5)
10347                 return bnxt_rfs_supported(bp);
10348         if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
10349                 return false;
10350
10351         vnics = 1 + bp->rx_nr_rings;
10352         max_vnics = bnxt_get_max_func_vnics(bp);
10353         max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
10354
10355         /* RSS contexts not a limiting factor */
10356         if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10357                 max_rss_ctxs = max_vnics;
10358         if (vnics > max_vnics || vnics > max_rss_ctxs) {
10359                 if (bp->rx_nr_rings > 1)
10360                         netdev_warn(bp->dev,
10361                                     "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
10362                                     min(max_rss_ctxs - 1, max_vnics - 1));
10363                 return false;
10364         }
10365
10366         if (!BNXT_NEW_RM(bp))
10367                 return true;
10368
10369         if (vnics == bp->hw_resc.resv_vnics)
10370                 return true;
10371
10372         bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
10373         if (vnics <= bp->hw_resc.resv_vnics)
10374                 return true;
10375
10376         netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
10377         bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
10378         return false;
10379 #else
10380         return false;
10381 #endif
10382 }
10383
10384 static netdev_features_t bnxt_fix_features(struct net_device *dev,
10385                                            netdev_features_t features)
10386 {
10387         struct bnxt *bp = netdev_priv(dev);
10388         netdev_features_t vlan_features;
10389
10390         if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
10391                 features &= ~NETIF_F_NTUPLE;
10392
10393         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
10394                 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
10395
10396         if (!(features & NETIF_F_GRO))
10397                 features &= ~NETIF_F_GRO_HW;
10398
10399         if (features & NETIF_F_GRO_HW)
10400                 features &= ~NETIF_F_LRO;
10401
10402         /* Both CTAG and STAG VLAN accelaration on the RX side have to be
10403          * turned on or off together.
10404          */
10405         vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
10406         if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
10407                 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
10408                         features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
10409                 else if (vlan_features)
10410                         features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
10411         }
10412 #ifdef CONFIG_BNXT_SRIOV
10413         if (BNXT_VF(bp) && bp->vf.vlan)
10414                 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
10415 #endif
10416         return features;
10417 }
10418
10419 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
10420 {
10421         struct bnxt *bp = netdev_priv(dev);
10422         u32 flags = bp->flags;
10423         u32 changes;
10424         int rc = 0;
10425         bool re_init = false;
10426         bool update_tpa = false;
10427
10428         flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
10429         if (features & NETIF_F_GRO_HW)
10430                 flags |= BNXT_FLAG_GRO;
10431         else if (features & NETIF_F_LRO)
10432                 flags |= BNXT_FLAG_LRO;
10433
10434         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
10435                 flags &= ~BNXT_FLAG_TPA;
10436
10437         if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
10438                 flags |= BNXT_FLAG_STRIP_VLAN;
10439
10440         if (features & NETIF_F_NTUPLE)
10441                 flags |= BNXT_FLAG_RFS;
10442
10443         changes = flags ^ bp->flags;
10444         if (changes & BNXT_FLAG_TPA) {
10445                 update_tpa = true;
10446                 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
10447                     (flags & BNXT_FLAG_TPA) == 0 ||
10448                     (bp->flags & BNXT_FLAG_CHIP_P5))
10449                         re_init = true;
10450         }
10451
10452         if (changes & ~BNXT_FLAG_TPA)
10453                 re_init = true;
10454
10455         if (flags != bp->flags) {
10456                 u32 old_flags = bp->flags;
10457
10458                 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10459                         bp->flags = flags;
10460                         if (update_tpa)
10461                                 bnxt_set_ring_params(bp);
10462                         return rc;
10463                 }
10464
10465                 if (re_init) {
10466                         bnxt_close_nic(bp, false, false);
10467                         bp->flags = flags;
10468                         if (update_tpa)
10469                                 bnxt_set_ring_params(bp);
10470
10471                         return bnxt_open_nic(bp, false, false);
10472                 }
10473                 if (update_tpa) {
10474                         bp->flags = flags;
10475                         rc = bnxt_set_tpa(bp,
10476                                           (flags & BNXT_FLAG_TPA) ?
10477                                           true : false);
10478                         if (rc)
10479                                 bp->flags = old_flags;
10480                 }
10481         }
10482         return rc;
10483 }
10484
10485 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
10486                          u32 *reg_buf)
10487 {
10488         struct hwrm_dbg_read_direct_output *resp = bp->hwrm_cmd_resp_addr;
10489         struct hwrm_dbg_read_direct_input req = {0};
10490         __le32 *dbg_reg_buf;
10491         dma_addr_t mapping;
10492         int rc, i;
10493
10494         dbg_reg_buf = dma_alloc_coherent(&bp->pdev->dev, num_words * 4,
10495                                          &mapping, GFP_KERNEL);
10496         if (!dbg_reg_buf)
10497                 return -ENOMEM;
10498         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_READ_DIRECT, -1, -1);
10499         req.host_dest_addr = cpu_to_le64(mapping);
10500         req.read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
10501         req.read_len32 = cpu_to_le32(num_words);
10502         mutex_lock(&bp->hwrm_cmd_lock);
10503         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10504         if (rc || resp->error_code) {
10505                 rc = -EIO;
10506                 goto dbg_rd_reg_exit;
10507         }
10508         for (i = 0; i < num_words; i++)
10509                 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
10510
10511 dbg_rd_reg_exit:
10512         mutex_unlock(&bp->hwrm_cmd_lock);
10513         dma_free_coherent(&bp->pdev->dev, num_words * 4, dbg_reg_buf, mapping);
10514         return rc;
10515 }
10516
10517 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
10518                                        u32 ring_id, u32 *prod, u32 *cons)
10519 {
10520         struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr;
10521         struct hwrm_dbg_ring_info_get_input req = {0};
10522         int rc;
10523
10524         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1);
10525         req.ring_type = ring_type;
10526         req.fw_ring_id = cpu_to_le32(ring_id);
10527         mutex_lock(&bp->hwrm_cmd_lock);
10528         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10529         if (!rc) {
10530                 *prod = le32_to_cpu(resp->producer_index);
10531                 *cons = le32_to_cpu(resp->consumer_index);
10532         }
10533         mutex_unlock(&bp->hwrm_cmd_lock);
10534         return rc;
10535 }
10536
10537 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
10538 {
10539         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
10540         int i = bnapi->index;
10541
10542         if (!txr)
10543                 return;
10544
10545         netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
10546                     i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
10547                     txr->tx_cons);
10548 }
10549
10550 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
10551 {
10552         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
10553         int i = bnapi->index;
10554
10555         if (!rxr)
10556                 return;
10557
10558         netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
10559                     i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
10560                     rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
10561                     rxr->rx_sw_agg_prod);
10562 }
10563
10564 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
10565 {
10566         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
10567         int i = bnapi->index;
10568
10569         netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
10570                     i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
10571 }
10572
10573 static void bnxt_dbg_dump_states(struct bnxt *bp)
10574 {
10575         int i;
10576         struct bnxt_napi *bnapi;
10577
10578         for (i = 0; i < bp->cp_nr_rings; i++) {
10579                 bnapi = bp->bnapi[i];
10580                 if (netif_msg_drv(bp)) {
10581                         bnxt_dump_tx_sw_state(bnapi);
10582                         bnxt_dump_rx_sw_state(bnapi);
10583                         bnxt_dump_cp_sw_state(bnapi);
10584                 }
10585         }
10586 }
10587
10588 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
10589 {
10590         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
10591         struct hwrm_ring_reset_input req = {0};
10592         struct bnxt_napi *bnapi = rxr->bnapi;
10593         struct bnxt_cp_ring_info *cpr;
10594         u16 cp_ring_id;
10595
10596         cpr = &bnapi->cp_ring;
10597         cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
10598         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_RESET, cp_ring_id, -1);
10599         req.ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
10600         req.ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
10601         return hwrm_send_message_silent(bp, &req, sizeof(req),
10602                                         HWRM_CMD_TIMEOUT);
10603 }
10604
10605 static void bnxt_reset_task(struct bnxt *bp, bool silent)
10606 {
10607         if (!silent)
10608                 bnxt_dbg_dump_states(bp);
10609         if (netif_running(bp->dev)) {
10610                 int rc;
10611
10612                 if (silent) {
10613                         bnxt_close_nic(bp, false, false);
10614                         bnxt_open_nic(bp, false, false);
10615                 } else {
10616                         bnxt_ulp_stop(bp);
10617                         bnxt_close_nic(bp, true, false);
10618                         rc = bnxt_open_nic(bp, true, false);
10619                         bnxt_ulp_start(bp, rc);
10620                 }
10621         }
10622 }
10623
10624 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
10625 {
10626         struct bnxt *bp = netdev_priv(dev);
10627
10628         netdev_err(bp->dev,  "TX timeout detected, starting reset task!\n");
10629         set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
10630         bnxt_queue_sp_work(bp);
10631 }
10632
10633 static void bnxt_fw_health_check(struct bnxt *bp)
10634 {
10635         struct bnxt_fw_health *fw_health = bp->fw_health;
10636         u32 val;
10637
10638         if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10639                 return;
10640
10641         if (fw_health->tmr_counter) {
10642                 fw_health->tmr_counter--;
10643                 return;
10644         }
10645
10646         val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
10647         if (val == fw_health->last_fw_heartbeat)
10648                 goto fw_reset;
10649
10650         fw_health->last_fw_heartbeat = val;
10651
10652         val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
10653         if (val != fw_health->last_fw_reset_cnt)
10654                 goto fw_reset;
10655
10656         fw_health->tmr_counter = fw_health->tmr_multiplier;
10657         return;
10658
10659 fw_reset:
10660         set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event);
10661         bnxt_queue_sp_work(bp);
10662 }
10663
10664 static void bnxt_timer(struct timer_list *t)
10665 {
10666         struct bnxt *bp = from_timer(bp, t, timer);
10667         struct net_device *dev = bp->dev;
10668
10669         if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
10670                 return;
10671
10672         if (atomic_read(&bp->intr_sem) != 0)
10673                 goto bnxt_restart_timer;
10674
10675         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
10676                 bnxt_fw_health_check(bp);
10677
10678         if (bp->link_info.link_up && bp->stats_coal_ticks) {
10679                 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
10680                 bnxt_queue_sp_work(bp);
10681         }
10682
10683         if (bnxt_tc_flower_enabled(bp)) {
10684                 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
10685                 bnxt_queue_sp_work(bp);
10686         }
10687
10688 #ifdef CONFIG_RFS_ACCEL
10689         if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) {
10690                 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
10691                 bnxt_queue_sp_work(bp);
10692         }
10693 #endif /*CONFIG_RFS_ACCEL*/
10694
10695         if (bp->link_info.phy_retry) {
10696                 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
10697                         bp->link_info.phy_retry = false;
10698                         netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
10699                 } else {
10700                         set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
10701                         bnxt_queue_sp_work(bp);
10702                 }
10703         }
10704
10705         if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
10706             netif_carrier_ok(dev)) {
10707                 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
10708                 bnxt_queue_sp_work(bp);
10709         }
10710 bnxt_restart_timer:
10711         mod_timer(&bp->timer, jiffies + bp->current_interval);
10712 }
10713
10714 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
10715 {
10716         /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
10717          * set.  If the device is being closed, bnxt_close() may be holding
10718          * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear.  So we
10719          * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
10720          */
10721         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10722         rtnl_lock();
10723 }
10724
10725 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
10726 {
10727         set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10728         rtnl_unlock();
10729 }
10730
10731 /* Only called from bnxt_sp_task() */
10732 static void bnxt_reset(struct bnxt *bp, bool silent)
10733 {
10734         bnxt_rtnl_lock_sp(bp);
10735         if (test_bit(BNXT_STATE_OPEN, &bp->state))
10736                 bnxt_reset_task(bp, silent);
10737         bnxt_rtnl_unlock_sp(bp);
10738 }
10739
10740 /* Only called from bnxt_sp_task() */
10741 static void bnxt_rx_ring_reset(struct bnxt *bp)
10742 {
10743         int i;
10744
10745         bnxt_rtnl_lock_sp(bp);
10746         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10747                 bnxt_rtnl_unlock_sp(bp);
10748                 return;
10749         }
10750         /* Disable and flush TPA before resetting the RX ring */
10751         if (bp->flags & BNXT_FLAG_TPA)
10752                 bnxt_set_tpa(bp, false);
10753         for (i = 0; i < bp->rx_nr_rings; i++) {
10754                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
10755                 struct bnxt_cp_ring_info *cpr;
10756                 int rc;
10757
10758                 if (!rxr->bnapi->in_reset)
10759                         continue;
10760
10761                 rc = bnxt_hwrm_rx_ring_reset(bp, i);
10762                 if (rc) {
10763                         if (rc == -EINVAL || rc == -EOPNOTSUPP)
10764                                 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
10765                         else
10766                                 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
10767                                             rc);
10768                         bnxt_reset_task(bp, true);
10769                         break;
10770                 }
10771                 bnxt_free_one_rx_ring_skbs(bp, i);
10772                 rxr->rx_prod = 0;
10773                 rxr->rx_agg_prod = 0;
10774                 rxr->rx_sw_agg_prod = 0;
10775                 rxr->rx_next_cons = 0;
10776                 rxr->bnapi->in_reset = false;
10777                 bnxt_alloc_one_rx_ring(bp, i);
10778                 cpr = &rxr->bnapi->cp_ring;
10779                 cpr->sw_stats.rx.rx_resets++;
10780                 if (bp->flags & BNXT_FLAG_AGG_RINGS)
10781                         bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
10782                 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
10783         }
10784         if (bp->flags & BNXT_FLAG_TPA)
10785                 bnxt_set_tpa(bp, true);
10786         bnxt_rtnl_unlock_sp(bp);
10787 }
10788
10789 static void bnxt_fw_reset_close(struct bnxt *bp)
10790 {
10791         bnxt_ulp_stop(bp);
10792         /* When firmware is fatal state, disable PCI device to prevent
10793          * any potential bad DMAs before freeing kernel memory.
10794          */
10795         if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
10796                 pci_disable_device(bp->pdev);
10797         __bnxt_close_nic(bp, true, false);
10798         bnxt_clear_int_mode(bp);
10799         bnxt_hwrm_func_drv_unrgtr(bp);
10800         if (pci_is_enabled(bp->pdev))
10801                 pci_disable_device(bp->pdev);
10802         bnxt_free_ctx_mem(bp);
10803         kfree(bp->ctx);
10804         bp->ctx = NULL;
10805 }
10806
10807 static bool is_bnxt_fw_ok(struct bnxt *bp)
10808 {
10809         struct bnxt_fw_health *fw_health = bp->fw_health;
10810         bool no_heartbeat = false, has_reset = false;
10811         u32 val;
10812
10813         val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
10814         if (val == fw_health->last_fw_heartbeat)
10815                 no_heartbeat = true;
10816
10817         val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
10818         if (val != fw_health->last_fw_reset_cnt)
10819                 has_reset = true;
10820
10821         if (!no_heartbeat && has_reset)
10822                 return true;
10823
10824         return false;
10825 }
10826
10827 /* rtnl_lock is acquired before calling this function */
10828 static void bnxt_force_fw_reset(struct bnxt *bp)
10829 {
10830         struct bnxt_fw_health *fw_health = bp->fw_health;
10831         u32 wait_dsecs;
10832
10833         if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
10834             test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10835                 return;
10836
10837         set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10838         bnxt_fw_reset_close(bp);
10839         wait_dsecs = fw_health->master_func_wait_dsecs;
10840         if (fw_health->master) {
10841                 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
10842                         wait_dsecs = 0;
10843                 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
10844         } else {
10845                 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
10846                 wait_dsecs = fw_health->normal_func_wait_dsecs;
10847                 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
10848         }
10849
10850         bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
10851         bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
10852         bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
10853 }
10854
10855 void bnxt_fw_exception(struct bnxt *bp)
10856 {
10857         netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
10858         set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
10859         bnxt_rtnl_lock_sp(bp);
10860         bnxt_force_fw_reset(bp);
10861         bnxt_rtnl_unlock_sp(bp);
10862 }
10863
10864 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
10865  * < 0 on error.
10866  */
10867 static int bnxt_get_registered_vfs(struct bnxt *bp)
10868 {
10869 #ifdef CONFIG_BNXT_SRIOV
10870         int rc;
10871
10872         if (!BNXT_PF(bp))
10873                 return 0;
10874
10875         rc = bnxt_hwrm_func_qcfg(bp);
10876         if (rc) {
10877                 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
10878                 return rc;
10879         }
10880         if (bp->pf.registered_vfs)
10881                 return bp->pf.registered_vfs;
10882         if (bp->sriov_cfg)
10883                 return 1;
10884 #endif
10885         return 0;
10886 }
10887
10888 void bnxt_fw_reset(struct bnxt *bp)
10889 {
10890         bnxt_rtnl_lock_sp(bp);
10891         if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
10892             !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10893                 int n = 0, tmo;
10894
10895                 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10896                 if (bp->pf.active_vfs &&
10897                     !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
10898                         n = bnxt_get_registered_vfs(bp);
10899                 if (n < 0) {
10900                         netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
10901                                    n);
10902                         clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10903                         dev_close(bp->dev);
10904                         goto fw_reset_exit;
10905                 } else if (n > 0) {
10906                         u16 vf_tmo_dsecs = n * 10;
10907
10908                         if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
10909                                 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
10910                         bp->fw_reset_state =
10911                                 BNXT_FW_RESET_STATE_POLL_VF;
10912                         bnxt_queue_fw_reset_work(bp, HZ / 10);
10913                         goto fw_reset_exit;
10914                 }
10915                 bnxt_fw_reset_close(bp);
10916                 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
10917                         bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
10918                         tmo = HZ / 10;
10919                 } else {
10920                         bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
10921                         tmo = bp->fw_reset_min_dsecs * HZ / 10;
10922                 }
10923                 bnxt_queue_fw_reset_work(bp, tmo);
10924         }
10925 fw_reset_exit:
10926         bnxt_rtnl_unlock_sp(bp);
10927 }
10928
10929 static void bnxt_chk_missed_irq(struct bnxt *bp)
10930 {
10931         int i;
10932
10933         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
10934                 return;
10935
10936         for (i = 0; i < bp->cp_nr_rings; i++) {
10937                 struct bnxt_napi *bnapi = bp->bnapi[i];
10938                 struct bnxt_cp_ring_info *cpr;
10939                 u32 fw_ring_id;
10940                 int j;
10941
10942                 if (!bnapi)
10943                         continue;
10944
10945                 cpr = &bnapi->cp_ring;
10946                 for (j = 0; j < 2; j++) {
10947                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
10948                         u32 val[2];
10949
10950                         if (!cpr2 || cpr2->has_more_work ||
10951                             !bnxt_has_work(bp, cpr2))
10952                                 continue;
10953
10954                         if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
10955                                 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
10956                                 continue;
10957                         }
10958                         fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
10959                         bnxt_dbg_hwrm_ring_info_get(bp,
10960                                 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
10961                                 fw_ring_id, &val[0], &val[1]);
10962                         cpr->sw_stats.cmn.missed_irqs++;
10963                 }
10964         }
10965 }
10966
10967 static void bnxt_cfg_ntp_filters(struct bnxt *);
10968
10969 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
10970 {
10971         struct bnxt_link_info *link_info = &bp->link_info;
10972
10973         if (BNXT_AUTO_MODE(link_info->auto_mode)) {
10974                 link_info->autoneg = BNXT_AUTONEG_SPEED;
10975                 if (bp->hwrm_spec_code >= 0x10201) {
10976                         if (link_info->auto_pause_setting &
10977                             PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
10978                                 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
10979                 } else {
10980                         link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
10981                 }
10982                 link_info->advertising = link_info->auto_link_speeds;
10983                 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
10984         } else {
10985                 link_info->req_link_speed = link_info->force_link_speed;
10986                 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
10987                 if (link_info->force_pam4_link_speed) {
10988                         link_info->req_link_speed =
10989                                 link_info->force_pam4_link_speed;
10990                         link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
10991                 }
10992                 link_info->req_duplex = link_info->duplex_setting;
10993         }
10994         if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
10995                 link_info->req_flow_ctrl =
10996                         link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
10997         else
10998                 link_info->req_flow_ctrl = link_info->force_pause_setting;
10999 }
11000
11001 static void bnxt_sp_task(struct work_struct *work)
11002 {
11003         struct bnxt *bp = container_of(work, struct bnxt, sp_task);
11004
11005         set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11006         smp_mb__after_atomic();
11007         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11008                 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11009                 return;
11010         }
11011
11012         if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
11013                 bnxt_cfg_rx_mode(bp);
11014
11015         if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
11016                 bnxt_cfg_ntp_filters(bp);
11017         if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
11018                 bnxt_hwrm_exec_fwd_req(bp);
11019         if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
11020                 bnxt_hwrm_port_qstats(bp, 0);
11021                 bnxt_hwrm_port_qstats_ext(bp, 0);
11022                 bnxt_accumulate_all_stats(bp);
11023         }
11024
11025         if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
11026                 int rc;
11027
11028                 mutex_lock(&bp->link_lock);
11029                 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
11030                                        &bp->sp_event))
11031                         bnxt_hwrm_phy_qcaps(bp);
11032
11033                 rc = bnxt_update_link(bp, true);
11034                 if (rc)
11035                         netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
11036                                    rc);
11037
11038                 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
11039                                        &bp->sp_event))
11040                         bnxt_init_ethtool_link_settings(bp);
11041                 mutex_unlock(&bp->link_lock);
11042         }
11043         if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
11044                 int rc;
11045
11046                 mutex_lock(&bp->link_lock);
11047                 rc = bnxt_update_phy_setting(bp);
11048                 mutex_unlock(&bp->link_lock);
11049                 if (rc) {
11050                         netdev_warn(bp->dev, "update phy settings retry failed\n");
11051                 } else {
11052                         bp->link_info.phy_retry = false;
11053                         netdev_info(bp->dev, "update phy settings retry succeeded\n");
11054                 }
11055         }
11056         if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
11057                 mutex_lock(&bp->link_lock);
11058                 bnxt_get_port_module_status(bp);
11059                 mutex_unlock(&bp->link_lock);
11060         }
11061
11062         if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
11063                 bnxt_tc_flow_stats_work(bp);
11064
11065         if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
11066                 bnxt_chk_missed_irq(bp);
11067
11068         /* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
11069          * must be the last functions to be called before exiting.
11070          */
11071         if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
11072                 bnxt_reset(bp, false);
11073
11074         if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
11075                 bnxt_reset(bp, true);
11076
11077         if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
11078                 bnxt_rx_ring_reset(bp);
11079
11080         if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event))
11081                 bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT);
11082
11083         if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
11084                 if (!is_bnxt_fw_ok(bp))
11085                         bnxt_devlink_health_report(bp,
11086                                                    BNXT_FW_EXCEPTION_SP_EVENT);
11087         }
11088
11089         smp_mb__before_atomic();
11090         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11091 }
11092
11093 /* Under rtnl_lock */
11094 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
11095                      int tx_xdp)
11096 {
11097         int max_rx, max_tx, tx_sets = 1;
11098         int tx_rings_needed, stats;
11099         int rx_rings = rx;
11100         int cp, vnics, rc;
11101
11102         if (tcs)
11103                 tx_sets = tcs;
11104
11105         rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
11106         if (rc)
11107                 return rc;
11108
11109         if (max_rx < rx)
11110                 return -ENOMEM;
11111
11112         tx_rings_needed = tx * tx_sets + tx_xdp;
11113         if (max_tx < tx_rings_needed)
11114                 return -ENOMEM;
11115
11116         vnics = 1;
11117         if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
11118                 vnics += rx_rings;
11119
11120         if (bp->flags & BNXT_FLAG_AGG_RINGS)
11121                 rx_rings <<= 1;
11122         cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
11123         stats = cp;
11124         if (BNXT_NEW_RM(bp)) {
11125                 cp += bnxt_get_ulp_msix_num(bp);
11126                 stats += bnxt_get_ulp_stat_ctxs(bp);
11127         }
11128         return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
11129                                      stats, vnics);
11130 }
11131
11132 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
11133 {
11134         if (bp->bar2) {
11135                 pci_iounmap(pdev, bp->bar2);
11136                 bp->bar2 = NULL;
11137         }
11138
11139         if (bp->bar1) {
11140                 pci_iounmap(pdev, bp->bar1);
11141                 bp->bar1 = NULL;
11142         }
11143
11144         if (bp->bar0) {
11145                 pci_iounmap(pdev, bp->bar0);
11146                 bp->bar0 = NULL;
11147         }
11148 }
11149
11150 static void bnxt_cleanup_pci(struct bnxt *bp)
11151 {
11152         bnxt_unmap_bars(bp, bp->pdev);
11153         pci_release_regions(bp->pdev);
11154         if (pci_is_enabled(bp->pdev))
11155                 pci_disable_device(bp->pdev);
11156 }
11157
11158 static void bnxt_init_dflt_coal(struct bnxt *bp)
11159 {
11160         struct bnxt_coal *coal;
11161
11162         /* Tick values in micro seconds.
11163          * 1 coal_buf x bufs_per_record = 1 completion record.
11164          */
11165         coal = &bp->rx_coal;
11166         coal->coal_ticks = 10;
11167         coal->coal_bufs = 30;
11168         coal->coal_ticks_irq = 1;
11169         coal->coal_bufs_irq = 2;
11170         coal->idle_thresh = 50;
11171         coal->bufs_per_record = 2;
11172         coal->budget = 64;              /* NAPI budget */
11173
11174         coal = &bp->tx_coal;
11175         coal->coal_ticks = 28;
11176         coal->coal_bufs = 30;
11177         coal->coal_ticks_irq = 2;
11178         coal->coal_bufs_irq = 2;
11179         coal->bufs_per_record = 1;
11180
11181         bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
11182 }
11183
11184 static int bnxt_fw_reset_via_optee(struct bnxt *bp)
11185 {
11186 #ifdef CONFIG_TEE_BNXT_FW
11187         int rc = tee_bnxt_fw_load();
11188
11189         if (rc)
11190                 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
11191
11192         return rc;
11193 #else
11194         netdev_err(bp->dev, "OP-TEE not supported\n");
11195         return -ENODEV;
11196 #endif
11197 }
11198
11199 static int bnxt_fw_init_one_p1(struct bnxt *bp)
11200 {
11201         int rc;
11202
11203         bp->fw_cap = 0;
11204         rc = bnxt_hwrm_ver_get(bp);
11205         bnxt_try_map_fw_health_reg(bp);
11206         if (rc) {
11207                 if (bp->fw_health && bp->fw_health->status_reliable) {
11208                         u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
11209
11210                         netdev_err(bp->dev,
11211                                    "Firmware not responding, status: 0x%x\n",
11212                                    sts);
11213                         if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
11214                                 netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
11215                                 rc = bnxt_fw_reset_via_optee(bp);
11216                                 if (!rc)
11217                                         rc = bnxt_hwrm_ver_get(bp);
11218                         }
11219                 }
11220                 if (rc)
11221                         return rc;
11222         }
11223
11224         if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) {
11225                 rc = bnxt_alloc_kong_hwrm_resources(bp);
11226                 if (rc)
11227                         bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL;
11228         }
11229
11230         if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
11231             bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
11232                 rc = bnxt_alloc_hwrm_short_cmd_req(bp);
11233                 if (rc)
11234                         return rc;
11235         }
11236         bnxt_nvm_cfg_ver_get(bp);
11237
11238         rc = bnxt_hwrm_func_reset(bp);
11239         if (rc)
11240                 return -ENODEV;
11241
11242         bnxt_hwrm_fw_set_time(bp);
11243         return 0;
11244 }
11245
11246 static int bnxt_fw_init_one_p2(struct bnxt *bp)
11247 {
11248         int rc;
11249
11250         /* Get the MAX capabilities for this function */
11251         rc = bnxt_hwrm_func_qcaps(bp);
11252         if (rc) {
11253                 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
11254                            rc);
11255                 return -ENODEV;
11256         }
11257
11258         rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
11259         if (rc)
11260                 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
11261                             rc);
11262
11263         if (bnxt_alloc_fw_health(bp)) {
11264                 netdev_warn(bp->dev, "no memory for firmware error recovery\n");
11265         } else {
11266                 rc = bnxt_hwrm_error_recovery_qcfg(bp);
11267                 if (rc)
11268                         netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
11269                                     rc);
11270         }
11271
11272         rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
11273         if (rc)
11274                 return -ENODEV;
11275
11276         bnxt_hwrm_func_qcfg(bp);
11277         bnxt_hwrm_vnic_qcaps(bp);
11278         bnxt_hwrm_port_led_qcaps(bp);
11279         bnxt_ethtool_init(bp);
11280         bnxt_dcb_init(bp);
11281         return 0;
11282 }
11283
11284 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
11285 {
11286         bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
11287         bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
11288                            VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
11289                            VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
11290                            VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
11291         if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
11292                 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
11293                 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
11294                                     VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
11295         }
11296 }
11297
11298 static void bnxt_set_dflt_rfs(struct bnxt *bp)
11299 {
11300         struct net_device *dev = bp->dev;
11301
11302         dev->hw_features &= ~NETIF_F_NTUPLE;
11303         dev->features &= ~NETIF_F_NTUPLE;
11304         bp->flags &= ~BNXT_FLAG_RFS;
11305         if (bnxt_rfs_supported(bp)) {
11306                 dev->hw_features |= NETIF_F_NTUPLE;
11307                 if (bnxt_rfs_capable(bp)) {
11308                         bp->flags |= BNXT_FLAG_RFS;
11309                         dev->features |= NETIF_F_NTUPLE;
11310                 }
11311         }
11312 }
11313
11314 static void bnxt_fw_init_one_p3(struct bnxt *bp)
11315 {
11316         struct pci_dev *pdev = bp->pdev;
11317
11318         bnxt_set_dflt_rss_hash_type(bp);
11319         bnxt_set_dflt_rfs(bp);
11320
11321         bnxt_get_wol_settings(bp);
11322         if (bp->flags & BNXT_FLAG_WOL_CAP)
11323                 device_set_wakeup_enable(&pdev->dev, bp->wol);
11324         else
11325                 device_set_wakeup_capable(&pdev->dev, false);
11326
11327         bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
11328         bnxt_hwrm_coal_params_qcaps(bp);
11329 }
11330
11331 static int bnxt_fw_init_one(struct bnxt *bp)
11332 {
11333         int rc;
11334
11335         rc = bnxt_fw_init_one_p1(bp);
11336         if (rc) {
11337                 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
11338                 return rc;
11339         }
11340         rc = bnxt_fw_init_one_p2(bp);
11341         if (rc) {
11342                 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
11343                 return rc;
11344         }
11345         rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
11346         if (rc)
11347                 return rc;
11348
11349         /* In case fw capabilities have changed, destroy the unneeded
11350          * reporters and create newly capable ones.
11351          */
11352         bnxt_dl_fw_reporters_destroy(bp, false);
11353         bnxt_dl_fw_reporters_create(bp);
11354         bnxt_fw_init_one_p3(bp);
11355         return 0;
11356 }
11357
11358 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
11359 {
11360         struct bnxt_fw_health *fw_health = bp->fw_health;
11361         u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
11362         u32 val = fw_health->fw_reset_seq_vals[reg_idx];
11363         u32 reg_type, reg_off, delay_msecs;
11364
11365         delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
11366         reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
11367         reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
11368         switch (reg_type) {
11369         case BNXT_FW_HEALTH_REG_TYPE_CFG:
11370                 pci_write_config_dword(bp->pdev, reg_off, val);
11371                 break;
11372         case BNXT_FW_HEALTH_REG_TYPE_GRC:
11373                 writel(reg_off & BNXT_GRC_BASE_MASK,
11374                        bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
11375                 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
11376                 fallthrough;
11377         case BNXT_FW_HEALTH_REG_TYPE_BAR0:
11378                 writel(val, bp->bar0 + reg_off);
11379                 break;
11380         case BNXT_FW_HEALTH_REG_TYPE_BAR1:
11381                 writel(val, bp->bar1 + reg_off);
11382                 break;
11383         }
11384         if (delay_msecs) {
11385                 pci_read_config_dword(bp->pdev, 0, &val);
11386                 msleep(delay_msecs);
11387         }
11388 }
11389
11390 static void bnxt_reset_all(struct bnxt *bp)
11391 {
11392         struct bnxt_fw_health *fw_health = bp->fw_health;
11393         int i, rc;
11394
11395         if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11396                 bnxt_fw_reset_via_optee(bp);
11397                 bp->fw_reset_timestamp = jiffies;
11398                 return;
11399         }
11400
11401         if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
11402                 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
11403                         bnxt_fw_reset_writel(bp, i);
11404         } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
11405                 struct hwrm_fw_reset_input req = {0};
11406
11407                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
11408                 req.resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
11409                 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
11410                 req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
11411                 req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
11412                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
11413                 if (rc)
11414                         netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
11415         }
11416         bp->fw_reset_timestamp = jiffies;
11417 }
11418
11419 static void bnxt_fw_reset_task(struct work_struct *work)
11420 {
11421         struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
11422         int rc;
11423
11424         if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
11425                 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
11426                 return;
11427         }
11428
11429         switch (bp->fw_reset_state) {
11430         case BNXT_FW_RESET_STATE_POLL_VF: {
11431                 int n = bnxt_get_registered_vfs(bp);
11432                 int tmo;
11433
11434                 if (n < 0) {
11435                         netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
11436                                    n, jiffies_to_msecs(jiffies -
11437                                    bp->fw_reset_timestamp));
11438                         goto fw_reset_abort;
11439                 } else if (n > 0) {
11440                         if (time_after(jiffies, bp->fw_reset_timestamp +
11441                                        (bp->fw_reset_max_dsecs * HZ / 10))) {
11442                                 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11443                                 bp->fw_reset_state = 0;
11444                                 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
11445                                            n);
11446                                 return;
11447                         }
11448                         bnxt_queue_fw_reset_work(bp, HZ / 10);
11449                         return;
11450                 }
11451                 bp->fw_reset_timestamp = jiffies;
11452                 rtnl_lock();
11453                 bnxt_fw_reset_close(bp);
11454                 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11455                         bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
11456                         tmo = HZ / 10;
11457                 } else {
11458                         bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11459                         tmo = bp->fw_reset_min_dsecs * HZ / 10;
11460                 }
11461                 rtnl_unlock();
11462                 bnxt_queue_fw_reset_work(bp, tmo);
11463                 return;
11464         }
11465         case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
11466                 u32 val;
11467
11468                 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
11469                 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
11470                     !time_after(jiffies, bp->fw_reset_timestamp +
11471                     (bp->fw_reset_max_dsecs * HZ / 10))) {
11472                         bnxt_queue_fw_reset_work(bp, HZ / 5);
11473                         return;
11474                 }
11475
11476                 if (!bp->fw_health->master) {
11477                         u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
11478
11479                         bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11480                         bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
11481                         return;
11482                 }
11483                 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
11484         }
11485                 fallthrough;
11486         case BNXT_FW_RESET_STATE_RESET_FW:
11487                 bnxt_reset_all(bp);
11488                 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11489                 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
11490                 return;
11491         case BNXT_FW_RESET_STATE_ENABLE_DEV:
11492                 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
11493                         u32 val;
11494
11495                         val = bnxt_fw_health_readl(bp,
11496                                                    BNXT_FW_RESET_INPROG_REG);
11497                         if (val)
11498                                 netdev_warn(bp->dev, "FW reset inprog %x after min wait time.\n",
11499                                             val);
11500                 }
11501                 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
11502                 if (pci_enable_device(bp->pdev)) {
11503                         netdev_err(bp->dev, "Cannot re-enable PCI device\n");
11504                         goto fw_reset_abort;
11505                 }
11506                 pci_set_master(bp->pdev);
11507                 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
11508                 fallthrough;
11509         case BNXT_FW_RESET_STATE_POLL_FW:
11510                 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
11511                 rc = __bnxt_hwrm_ver_get(bp, true);
11512                 if (rc) {
11513                         if (time_after(jiffies, bp->fw_reset_timestamp +
11514                                        (bp->fw_reset_max_dsecs * HZ / 10))) {
11515                                 netdev_err(bp->dev, "Firmware reset aborted\n");
11516                                 goto fw_reset_abort_status;
11517                         }
11518                         bnxt_queue_fw_reset_work(bp, HZ / 5);
11519                         return;
11520                 }
11521                 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
11522                 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
11523                 fallthrough;
11524         case BNXT_FW_RESET_STATE_OPENING:
11525                 while (!rtnl_trylock()) {
11526                         bnxt_queue_fw_reset_work(bp, HZ / 10);
11527                         return;
11528                 }
11529                 rc = bnxt_open(bp->dev);
11530                 if (rc) {
11531                         netdev_err(bp->dev, "bnxt_open_nic() failed\n");
11532                         clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11533                         dev_close(bp->dev);
11534                 }
11535
11536                 bp->fw_reset_state = 0;
11537                 /* Make sure fw_reset_state is 0 before clearing the flag */
11538                 smp_mb__before_atomic();
11539                 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11540                 bnxt_ulp_start(bp, rc);
11541                 if (!rc)
11542                         bnxt_reenable_sriov(bp);
11543                 bnxt_dl_health_recovery_done(bp);
11544                 bnxt_dl_health_status_update(bp, true);
11545                 rtnl_unlock();
11546                 break;
11547         }
11548         return;
11549
11550 fw_reset_abort_status:
11551         if (bp->fw_health->status_reliable ||
11552             (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
11553                 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
11554
11555                 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
11556         }
11557 fw_reset_abort:
11558         clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11559         if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
11560                 bnxt_dl_health_status_update(bp, false);
11561         bp->fw_reset_state = 0;
11562         rtnl_lock();
11563         dev_close(bp->dev);
11564         rtnl_unlock();
11565 }
11566
11567 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
11568 {
11569         int rc;
11570         struct bnxt *bp = netdev_priv(dev);
11571
11572         SET_NETDEV_DEV(dev, &pdev->dev);
11573
11574         /* enable device (incl. PCI PM wakeup), and bus-mastering */
11575         rc = pci_enable_device(pdev);
11576         if (rc) {
11577                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
11578                 goto init_err;
11579         }
11580
11581         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11582                 dev_err(&pdev->dev,
11583                         "Cannot find PCI device base address, aborting\n");
11584                 rc = -ENODEV;
11585                 goto init_err_disable;
11586         }
11587
11588         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11589         if (rc) {
11590                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
11591                 goto init_err_disable;
11592         }
11593
11594         if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
11595             dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
11596                 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
11597                 goto init_err_disable;
11598         }
11599
11600         pci_set_master(pdev);
11601
11602         bp->dev = dev;
11603         bp->pdev = pdev;
11604
11605         /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
11606          * determines the BAR size.
11607          */
11608         bp->bar0 = pci_ioremap_bar(pdev, 0);
11609         if (!bp->bar0) {
11610                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
11611                 rc = -ENOMEM;
11612                 goto init_err_release;
11613         }
11614
11615         bp->bar2 = pci_ioremap_bar(pdev, 4);
11616         if (!bp->bar2) {
11617                 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
11618                 rc = -ENOMEM;
11619                 goto init_err_release;
11620         }
11621
11622         pci_enable_pcie_error_reporting(pdev);
11623
11624         INIT_WORK(&bp->sp_task, bnxt_sp_task);
11625         INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
11626
11627         spin_lock_init(&bp->ntp_fltr_lock);
11628 #if BITS_PER_LONG == 32
11629         spin_lock_init(&bp->db_lock);
11630 #endif
11631
11632         bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
11633         bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
11634
11635         bnxt_init_dflt_coal(bp);
11636
11637         timer_setup(&bp->timer, bnxt_timer, 0);
11638         bp->current_interval = BNXT_TIMER_INTERVAL;
11639
11640         bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
11641         bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
11642
11643         clear_bit(BNXT_STATE_OPEN, &bp->state);
11644         return 0;
11645
11646 init_err_release:
11647         bnxt_unmap_bars(bp, pdev);
11648         pci_release_regions(pdev);
11649
11650 init_err_disable:
11651         pci_disable_device(pdev);
11652
11653 init_err:
11654         return rc;
11655 }
11656
11657 /* rtnl_lock held */
11658 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
11659 {
11660         struct sockaddr *addr = p;
11661         struct bnxt *bp = netdev_priv(dev);
11662         int rc = 0;
11663
11664         if (!is_valid_ether_addr(addr->sa_data))
11665                 return -EADDRNOTAVAIL;
11666
11667         if (ether_addr_equal(addr->sa_data, dev->dev_addr))
11668                 return 0;
11669
11670         rc = bnxt_approve_mac(bp, addr->sa_data, true);
11671         if (rc)
11672                 return rc;
11673
11674         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
11675         if (netif_running(dev)) {
11676                 bnxt_close_nic(bp, false, false);
11677                 rc = bnxt_open_nic(bp, false, false);
11678         }
11679
11680         return rc;
11681 }
11682
11683 /* rtnl_lock held */
11684 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
11685 {
11686         struct bnxt *bp = netdev_priv(dev);
11687
11688         if (netif_running(dev))
11689                 bnxt_close_nic(bp, true, false);
11690
11691         dev->mtu = new_mtu;
11692         bnxt_set_ring_params(bp);
11693
11694         if (netif_running(dev))
11695                 return bnxt_open_nic(bp, true, false);
11696
11697         return 0;
11698 }
11699
11700 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
11701 {
11702         struct bnxt *bp = netdev_priv(dev);
11703         bool sh = false;
11704         int rc;
11705
11706         if (tc > bp->max_tc) {
11707                 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
11708                            tc, bp->max_tc);
11709                 return -EINVAL;
11710         }
11711
11712         if (netdev_get_num_tc(dev) == tc)
11713                 return 0;
11714
11715         if (bp->flags & BNXT_FLAG_SHARED_RINGS)
11716                 sh = true;
11717
11718         rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
11719                               sh, tc, bp->tx_nr_rings_xdp);
11720         if (rc)
11721                 return rc;
11722
11723         /* Needs to close the device and do hw resource re-allocations */
11724         if (netif_running(bp->dev))
11725                 bnxt_close_nic(bp, true, false);
11726
11727         if (tc) {
11728                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
11729                 netdev_set_num_tc(dev, tc);
11730         } else {
11731                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
11732                 netdev_reset_tc(dev);
11733         }
11734         bp->tx_nr_rings += bp->tx_nr_rings_xdp;
11735         bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
11736                                bp->tx_nr_rings + bp->rx_nr_rings;
11737
11738         if (netif_running(bp->dev))
11739                 return bnxt_open_nic(bp, true, false);
11740
11741         return 0;
11742 }
11743
11744 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
11745                                   void *cb_priv)
11746 {
11747         struct bnxt *bp = cb_priv;
11748
11749         if (!bnxt_tc_flower_enabled(bp) ||
11750             !tc_cls_can_offload_and_chain0(bp->dev, type_data))
11751                 return -EOPNOTSUPP;
11752
11753         switch (type) {
11754         case TC_SETUP_CLSFLOWER:
11755                 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
11756         default:
11757                 return -EOPNOTSUPP;
11758         }
11759 }
11760
11761 LIST_HEAD(bnxt_block_cb_list);
11762
11763 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
11764                          void *type_data)
11765 {
11766         struct bnxt *bp = netdev_priv(dev);
11767
11768         switch (type) {
11769         case TC_SETUP_BLOCK:
11770                 return flow_block_cb_setup_simple(type_data,
11771                                                   &bnxt_block_cb_list,
11772                                                   bnxt_setup_tc_block_cb,
11773                                                   bp, bp, true);
11774         case TC_SETUP_QDISC_MQPRIO: {
11775                 struct tc_mqprio_qopt *mqprio = type_data;
11776
11777                 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
11778
11779                 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
11780         }
11781         default:
11782                 return -EOPNOTSUPP;
11783         }
11784 }
11785
11786 #ifdef CONFIG_RFS_ACCEL
11787 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
11788                             struct bnxt_ntuple_filter *f2)
11789 {
11790         struct flow_keys *keys1 = &f1->fkeys;
11791         struct flow_keys *keys2 = &f2->fkeys;
11792
11793         if (keys1->basic.n_proto != keys2->basic.n_proto ||
11794             keys1->basic.ip_proto != keys2->basic.ip_proto)
11795                 return false;
11796
11797         if (keys1->basic.n_proto == htons(ETH_P_IP)) {
11798                 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
11799                     keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)
11800                         return false;
11801         } else {
11802                 if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src,
11803                            sizeof(keys1->addrs.v6addrs.src)) ||
11804                     memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst,
11805                            sizeof(keys1->addrs.v6addrs.dst)))
11806                         return false;
11807         }
11808
11809         if (keys1->ports.ports == keys2->ports.ports &&
11810             keys1->control.flags == keys2->control.flags &&
11811             ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
11812             ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
11813                 return true;
11814
11815         return false;
11816 }
11817
11818 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
11819                               u16 rxq_index, u32 flow_id)
11820 {
11821         struct bnxt *bp = netdev_priv(dev);
11822         struct bnxt_ntuple_filter *fltr, *new_fltr;
11823         struct flow_keys *fkeys;
11824         struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
11825         int rc = 0, idx, bit_id, l2_idx = 0;
11826         struct hlist_head *head;
11827         u32 flags;
11828
11829         if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
11830                 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
11831                 int off = 0, j;
11832
11833                 netif_addr_lock_bh(dev);
11834                 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
11835                         if (ether_addr_equal(eth->h_dest,
11836                                              vnic->uc_list + off)) {
11837                                 l2_idx = j + 1;
11838                                 break;
11839                         }
11840                 }
11841                 netif_addr_unlock_bh(dev);
11842                 if (!l2_idx)
11843                         return -EINVAL;
11844         }
11845         new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
11846         if (!new_fltr)
11847                 return -ENOMEM;
11848
11849         fkeys = &new_fltr->fkeys;
11850         if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
11851                 rc = -EPROTONOSUPPORT;
11852                 goto err_free;
11853         }
11854
11855         if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
11856              fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
11857             ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
11858              (fkeys->basic.ip_proto != IPPROTO_UDP))) {
11859                 rc = -EPROTONOSUPPORT;
11860                 goto err_free;
11861         }
11862         if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
11863             bp->hwrm_spec_code < 0x10601) {
11864                 rc = -EPROTONOSUPPORT;
11865                 goto err_free;
11866         }
11867         flags = fkeys->control.flags;
11868         if (((flags & FLOW_DIS_ENCAPSULATION) &&
11869              bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
11870                 rc = -EPROTONOSUPPORT;
11871                 goto err_free;
11872         }
11873
11874         memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
11875         memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
11876
11877         idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
11878         head = &bp->ntp_fltr_hash_tbl[idx];
11879         rcu_read_lock();
11880         hlist_for_each_entry_rcu(fltr, head, hash) {
11881                 if (bnxt_fltr_match(fltr, new_fltr)) {
11882                         rcu_read_unlock();
11883                         rc = 0;
11884                         goto err_free;
11885                 }
11886         }
11887         rcu_read_unlock();
11888
11889         spin_lock_bh(&bp->ntp_fltr_lock);
11890         bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
11891                                          BNXT_NTP_FLTR_MAX_FLTR, 0);
11892         if (bit_id < 0) {
11893                 spin_unlock_bh(&bp->ntp_fltr_lock);
11894                 rc = -ENOMEM;
11895                 goto err_free;
11896         }
11897
11898         new_fltr->sw_id = (u16)bit_id;
11899         new_fltr->flow_id = flow_id;
11900         new_fltr->l2_fltr_idx = l2_idx;
11901         new_fltr->rxq = rxq_index;
11902         hlist_add_head_rcu(&new_fltr->hash, head);
11903         bp->ntp_fltr_count++;
11904         spin_unlock_bh(&bp->ntp_fltr_lock);
11905
11906         set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
11907         bnxt_queue_sp_work(bp);
11908
11909         return new_fltr->sw_id;
11910
11911 err_free:
11912         kfree(new_fltr);
11913         return rc;
11914 }
11915
11916 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
11917 {
11918         int i;
11919
11920         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
11921                 struct hlist_head *head;
11922                 struct hlist_node *tmp;
11923                 struct bnxt_ntuple_filter *fltr;
11924                 int rc;
11925
11926                 head = &bp->ntp_fltr_hash_tbl[i];
11927                 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
11928                         bool del = false;
11929
11930                         if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
11931                                 if (rps_may_expire_flow(bp->dev, fltr->rxq,
11932                                                         fltr->flow_id,
11933                                                         fltr->sw_id)) {
11934                                         bnxt_hwrm_cfa_ntuple_filter_free(bp,
11935                                                                          fltr);
11936                                         del = true;
11937                                 }
11938                         } else {
11939                                 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
11940                                                                        fltr);
11941                                 if (rc)
11942                                         del = true;
11943                                 else
11944                                         set_bit(BNXT_FLTR_VALID, &fltr->state);
11945                         }
11946
11947                         if (del) {
11948                                 spin_lock_bh(&bp->ntp_fltr_lock);
11949                                 hlist_del_rcu(&fltr->hash);
11950                                 bp->ntp_fltr_count--;
11951                                 spin_unlock_bh(&bp->ntp_fltr_lock);
11952                                 synchronize_rcu();
11953                                 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
11954                                 kfree(fltr);
11955                         }
11956                 }
11957         }
11958         if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
11959                 netdev_info(bp->dev, "Receive PF driver unload event!\n");
11960 }
11961
11962 #else
11963
11964 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
11965 {
11966 }
11967
11968 #endif /* CONFIG_RFS_ACCEL */
11969
11970 static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
11971 {
11972         struct bnxt *bp = netdev_priv(netdev);
11973         struct udp_tunnel_info ti;
11974         unsigned int cmd;
11975
11976         udp_tunnel_nic_get_port(netdev, table, 0, &ti);
11977         if (ti.type == UDP_TUNNEL_TYPE_VXLAN)
11978                 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
11979         else
11980                 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
11981
11982         if (ti.port)
11983                 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
11984
11985         return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
11986 }
11987
11988 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
11989         .sync_table     = bnxt_udp_tunnel_sync,
11990         .flags          = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
11991                           UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
11992         .tables         = {
11993                 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
11994                 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
11995         },
11996 };
11997
11998 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
11999                                struct net_device *dev, u32 filter_mask,
12000                                int nlflags)
12001 {
12002         struct bnxt *bp = netdev_priv(dev);
12003
12004         return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
12005                                        nlflags, filter_mask, NULL);
12006 }
12007
12008 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
12009                                u16 flags, struct netlink_ext_ack *extack)
12010 {
12011         struct bnxt *bp = netdev_priv(dev);
12012         struct nlattr *attr, *br_spec;
12013         int rem, rc = 0;
12014
12015         if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
12016                 return -EOPNOTSUPP;
12017
12018         br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
12019         if (!br_spec)
12020                 return -EINVAL;
12021
12022         nla_for_each_nested(attr, br_spec, rem) {
12023                 u16 mode;
12024
12025                 if (nla_type(attr) != IFLA_BRIDGE_MODE)
12026                         continue;
12027
12028                 if (nla_len(attr) < sizeof(mode))
12029                         return -EINVAL;
12030
12031                 mode = nla_get_u16(attr);
12032                 if (mode == bp->br_mode)
12033                         break;
12034
12035                 rc = bnxt_hwrm_set_br_mode(bp, mode);
12036                 if (!rc)
12037                         bp->br_mode = mode;
12038                 break;
12039         }
12040         return rc;
12041 }
12042
12043 int bnxt_get_port_parent_id(struct net_device *dev,
12044                             struct netdev_phys_item_id *ppid)
12045 {
12046         struct bnxt *bp = netdev_priv(dev);
12047
12048         if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
12049                 return -EOPNOTSUPP;
12050
12051         /* The PF and it's VF-reps only support the switchdev framework */
12052         if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
12053                 return -EOPNOTSUPP;
12054
12055         ppid->id_len = sizeof(bp->dsn);
12056         memcpy(ppid->id, bp->dsn, ppid->id_len);
12057
12058         return 0;
12059 }
12060
12061 static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev)
12062 {
12063         struct bnxt *bp = netdev_priv(dev);
12064
12065         return &bp->dl_port;
12066 }
12067
12068 static const struct net_device_ops bnxt_netdev_ops = {
12069         .ndo_open               = bnxt_open,
12070         .ndo_start_xmit         = bnxt_start_xmit,
12071         .ndo_stop               = bnxt_close,
12072         .ndo_get_stats64        = bnxt_get_stats64,
12073         .ndo_set_rx_mode        = bnxt_set_rx_mode,
12074         .ndo_do_ioctl           = bnxt_ioctl,
12075         .ndo_validate_addr      = eth_validate_addr,
12076         .ndo_set_mac_address    = bnxt_change_mac_addr,
12077         .ndo_change_mtu         = bnxt_change_mtu,
12078         .ndo_fix_features       = bnxt_fix_features,
12079         .ndo_set_features       = bnxt_set_features,
12080         .ndo_tx_timeout         = bnxt_tx_timeout,
12081 #ifdef CONFIG_BNXT_SRIOV
12082         .ndo_get_vf_config      = bnxt_get_vf_config,
12083         .ndo_set_vf_mac         = bnxt_set_vf_mac,
12084         .ndo_set_vf_vlan        = bnxt_set_vf_vlan,
12085         .ndo_set_vf_rate        = bnxt_set_vf_bw,
12086         .ndo_set_vf_link_state  = bnxt_set_vf_link_state,
12087         .ndo_set_vf_spoofchk    = bnxt_set_vf_spoofchk,
12088         .ndo_set_vf_trust       = bnxt_set_vf_trust,
12089 #endif
12090         .ndo_setup_tc           = bnxt_setup_tc,
12091 #ifdef CONFIG_RFS_ACCEL
12092         .ndo_rx_flow_steer      = bnxt_rx_flow_steer,
12093 #endif
12094         .ndo_udp_tunnel_add     = udp_tunnel_nic_add_port,
12095         .ndo_udp_tunnel_del     = udp_tunnel_nic_del_port,
12096         .ndo_bpf                = bnxt_xdp,
12097         .ndo_xdp_xmit           = bnxt_xdp_xmit,
12098         .ndo_bridge_getlink     = bnxt_bridge_getlink,
12099         .ndo_bridge_setlink     = bnxt_bridge_setlink,
12100         .ndo_get_devlink_port   = bnxt_get_devlink_port,
12101 };
12102
12103 static void bnxt_remove_one(struct pci_dev *pdev)
12104 {
12105         struct net_device *dev = pci_get_drvdata(pdev);
12106         struct bnxt *bp = netdev_priv(dev);
12107
12108         if (BNXT_PF(bp))
12109                 bnxt_sriov_disable(bp);
12110
12111         clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12112         bnxt_cancel_sp_work(bp);
12113         bp->sp_event = 0;
12114
12115         bnxt_dl_fw_reporters_destroy(bp, true);
12116         if (BNXT_PF(bp))
12117                 devlink_port_type_clear(&bp->dl_port);
12118         pci_disable_pcie_error_reporting(pdev);
12119         unregister_netdev(dev);
12120         bnxt_dl_unregister(bp);
12121         bnxt_shutdown_tc(bp);
12122
12123         bnxt_clear_int_mode(bp);
12124         bnxt_hwrm_func_drv_unrgtr(bp);
12125         bnxt_free_hwrm_resources(bp);
12126         bnxt_free_hwrm_short_cmd_req(bp);
12127         bnxt_ethtool_free(bp);
12128         bnxt_dcb_free(bp);
12129         kfree(bp->edev);
12130         bp->edev = NULL;
12131         kfree(bp->fw_health);
12132         bp->fw_health = NULL;
12133         bnxt_cleanup_pci(bp);
12134         bnxt_free_ctx_mem(bp);
12135         kfree(bp->ctx);
12136         bp->ctx = NULL;
12137         kfree(bp->rss_indir_tbl);
12138         bp->rss_indir_tbl = NULL;
12139         bnxt_free_port_stats(bp);
12140         free_netdev(dev);
12141 }
12142
12143 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
12144 {
12145         int rc = 0;
12146         struct bnxt_link_info *link_info = &bp->link_info;
12147
12148         rc = bnxt_hwrm_phy_qcaps(bp);
12149         if (rc) {
12150                 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
12151                            rc);
12152                 return rc;
12153         }
12154         if (!fw_dflt)
12155                 return 0;
12156
12157         rc = bnxt_update_link(bp, false);
12158         if (rc) {
12159                 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
12160                            rc);
12161                 return rc;
12162         }
12163
12164         /* Older firmware does not have supported_auto_speeds, so assume
12165          * that all supported speeds can be autonegotiated.
12166          */
12167         if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
12168                 link_info->support_auto_speeds = link_info->support_speeds;
12169
12170         bnxt_init_ethtool_link_settings(bp);
12171         return 0;
12172 }
12173
12174 static int bnxt_get_max_irq(struct pci_dev *pdev)
12175 {
12176         u16 ctrl;
12177
12178         if (!pdev->msix_cap)
12179                 return 1;
12180
12181         pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
12182         return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
12183 }
12184
12185 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
12186                                 int *max_cp)
12187 {
12188         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
12189         int max_ring_grps = 0, max_irq;
12190
12191         *max_tx = hw_resc->max_tx_rings;
12192         *max_rx = hw_resc->max_rx_rings;
12193         *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
12194         max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
12195                         bnxt_get_ulp_msix_num(bp),
12196                         hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
12197         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
12198                 *max_cp = min_t(int, *max_cp, max_irq);
12199         max_ring_grps = hw_resc->max_hw_ring_grps;
12200         if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
12201                 *max_cp -= 1;
12202                 *max_rx -= 2;
12203         }
12204         if (bp->flags & BNXT_FLAG_AGG_RINGS)
12205                 *max_rx >>= 1;
12206         if (bp->flags & BNXT_FLAG_CHIP_P5) {
12207                 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
12208                 /* On P5 chips, max_cp output param should be available NQs */
12209                 *max_cp = max_irq;
12210         }
12211         *max_rx = min_t(int, *max_rx, max_ring_grps);
12212 }
12213
12214 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
12215 {
12216         int rx, tx, cp;
12217
12218         _bnxt_get_max_rings(bp, &rx, &tx, &cp);
12219         *max_rx = rx;
12220         *max_tx = tx;
12221         if (!rx || !tx || !cp)
12222                 return -ENOMEM;
12223
12224         return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
12225 }
12226
12227 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
12228                                bool shared)
12229 {
12230         int rc;
12231
12232         rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
12233         if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
12234                 /* Not enough rings, try disabling agg rings. */
12235                 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
12236                 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
12237                 if (rc) {
12238                         /* set BNXT_FLAG_AGG_RINGS back for consistency */
12239                         bp->flags |= BNXT_FLAG_AGG_RINGS;
12240                         return rc;
12241                 }
12242                 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
12243                 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12244                 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12245                 bnxt_set_ring_params(bp);
12246         }
12247
12248         if (bp->flags & BNXT_FLAG_ROCE_CAP) {
12249                 int max_cp, max_stat, max_irq;
12250
12251                 /* Reserve minimum resources for RoCE */
12252                 max_cp = bnxt_get_max_func_cp_rings(bp);
12253                 max_stat = bnxt_get_max_func_stat_ctxs(bp);
12254                 max_irq = bnxt_get_max_func_irqs(bp);
12255                 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
12256                     max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
12257                     max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
12258                         return 0;
12259
12260                 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
12261                 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
12262                 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
12263                 max_cp = min_t(int, max_cp, max_irq);
12264                 max_cp = min_t(int, max_cp, max_stat);
12265                 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
12266                 if (rc)
12267                         rc = 0;
12268         }
12269         return rc;
12270 }
12271
12272 /* In initial default shared ring setting, each shared ring must have a
12273  * RX/TX ring pair.
12274  */
12275 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
12276 {
12277         bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
12278         bp->rx_nr_rings = bp->cp_nr_rings;
12279         bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
12280         bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12281 }
12282
12283 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
12284 {
12285         int dflt_rings, max_rx_rings, max_tx_rings, rc;
12286
12287         if (!bnxt_can_reserve_rings(bp))
12288                 return 0;
12289
12290         if (sh)
12291                 bp->flags |= BNXT_FLAG_SHARED_RINGS;
12292         dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
12293         /* Reduce default rings on multi-port cards so that total default
12294          * rings do not exceed CPU count.
12295          */
12296         if (bp->port_count > 1) {
12297                 int max_rings =
12298                         max_t(int, num_online_cpus() / bp->port_count, 1);
12299
12300                 dflt_rings = min_t(int, dflt_rings, max_rings);
12301         }
12302         rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
12303         if (rc)
12304                 return rc;
12305         bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
12306         bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
12307         if (sh)
12308                 bnxt_trim_dflt_sh_rings(bp);
12309         else
12310                 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
12311         bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12312
12313         rc = __bnxt_reserve_rings(bp);
12314         if (rc)
12315                 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
12316         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12317         if (sh)
12318                 bnxt_trim_dflt_sh_rings(bp);
12319
12320         /* Rings may have been trimmed, re-reserve the trimmed rings. */
12321         if (bnxt_need_reserve_rings(bp)) {
12322                 rc = __bnxt_reserve_rings(bp);
12323                 if (rc)
12324                         netdev_warn(bp->dev, "2nd rings reservation failed.\n");
12325                 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12326         }
12327         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
12328                 bp->rx_nr_rings++;
12329                 bp->cp_nr_rings++;
12330         }
12331         if (rc) {
12332                 bp->tx_nr_rings = 0;
12333                 bp->rx_nr_rings = 0;
12334         }
12335         return rc;
12336 }
12337
12338 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
12339 {
12340         int rc;
12341
12342         if (bp->tx_nr_rings)
12343                 return 0;
12344
12345         bnxt_ulp_irq_stop(bp);
12346         bnxt_clear_int_mode(bp);
12347         rc = bnxt_set_dflt_rings(bp, true);
12348         if (rc) {
12349                 netdev_err(bp->dev, "Not enough rings available.\n");
12350                 goto init_dflt_ring_err;
12351         }
12352         rc = bnxt_init_int_mode(bp);
12353         if (rc)
12354                 goto init_dflt_ring_err;
12355
12356         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12357         if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
12358                 bp->flags |= BNXT_FLAG_RFS;
12359                 bp->dev->features |= NETIF_F_NTUPLE;
12360         }
12361 init_dflt_ring_err:
12362         bnxt_ulp_irq_restart(bp, rc);
12363         return rc;
12364 }
12365
12366 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
12367 {
12368         int rc;
12369
12370         ASSERT_RTNL();
12371         bnxt_hwrm_func_qcaps(bp);
12372
12373         if (netif_running(bp->dev))
12374                 __bnxt_close_nic(bp, true, false);
12375
12376         bnxt_ulp_irq_stop(bp);
12377         bnxt_clear_int_mode(bp);
12378         rc = bnxt_init_int_mode(bp);
12379         bnxt_ulp_irq_restart(bp, rc);
12380
12381         if (netif_running(bp->dev)) {
12382                 if (rc)
12383                         dev_close(bp->dev);
12384                 else
12385                         rc = bnxt_open_nic(bp, true, false);
12386         }
12387
12388         return rc;
12389 }
12390
12391 static int bnxt_init_mac_addr(struct bnxt *bp)
12392 {
12393         int rc = 0;
12394
12395         if (BNXT_PF(bp)) {
12396                 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
12397         } else {
12398 #ifdef CONFIG_BNXT_SRIOV
12399                 struct bnxt_vf_info *vf = &bp->vf;
12400                 bool strict_approval = true;
12401
12402                 if (is_valid_ether_addr(vf->mac_addr)) {
12403                         /* overwrite netdev dev_addr with admin VF MAC */
12404                         memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
12405                         /* Older PF driver or firmware may not approve this
12406                          * correctly.
12407                          */
12408                         strict_approval = false;
12409                 } else {
12410                         eth_hw_addr_random(bp->dev);
12411                 }
12412                 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
12413 #endif
12414         }
12415         return rc;
12416 }
12417
12418 #define BNXT_VPD_LEN    512
12419 static void bnxt_vpd_read_info(struct bnxt *bp)
12420 {
12421         struct pci_dev *pdev = bp->pdev;
12422         int i, len, pos, ro_size, size;
12423         ssize_t vpd_size;
12424         u8 *vpd_data;
12425
12426         vpd_data = kmalloc(BNXT_VPD_LEN, GFP_KERNEL);
12427         if (!vpd_data)
12428                 return;
12429
12430         vpd_size = pci_read_vpd(pdev, 0, BNXT_VPD_LEN, vpd_data);
12431         if (vpd_size <= 0) {
12432                 netdev_err(bp->dev, "Unable to read VPD\n");
12433                 goto exit;
12434         }
12435
12436         i = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
12437         if (i < 0) {
12438                 netdev_err(bp->dev, "VPD READ-Only not found\n");
12439                 goto exit;
12440         }
12441
12442         ro_size = pci_vpd_lrdt_size(&vpd_data[i]);
12443         i += PCI_VPD_LRDT_TAG_SIZE;
12444         if (i + ro_size > vpd_size)
12445                 goto exit;
12446
12447         pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
12448                                         PCI_VPD_RO_KEYWORD_PARTNO);
12449         if (pos < 0)
12450                 goto read_sn;
12451
12452         len = pci_vpd_info_field_size(&vpd_data[pos]);
12453         pos += PCI_VPD_INFO_FLD_HDR_SIZE;
12454         if (len + pos > vpd_size)
12455                 goto read_sn;
12456
12457         size = min(len, BNXT_VPD_FLD_LEN - 1);
12458         memcpy(bp->board_partno, &vpd_data[pos], size);
12459
12460 read_sn:
12461         pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
12462                                         PCI_VPD_RO_KEYWORD_SERIALNO);
12463         if (pos < 0)
12464                 goto exit;
12465
12466         len = pci_vpd_info_field_size(&vpd_data[pos]);
12467         pos += PCI_VPD_INFO_FLD_HDR_SIZE;
12468         if (len + pos > vpd_size)
12469                 goto exit;
12470
12471         size = min(len, BNXT_VPD_FLD_LEN - 1);
12472         memcpy(bp->board_serialno, &vpd_data[pos], size);
12473 exit:
12474         kfree(vpd_data);
12475 }
12476
12477 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
12478 {
12479         struct pci_dev *pdev = bp->pdev;
12480         u64 qword;
12481
12482         qword = pci_get_dsn(pdev);
12483         if (!qword) {
12484                 netdev_info(bp->dev, "Unable to read adapter's DSN\n");
12485                 return -EOPNOTSUPP;
12486         }
12487
12488         put_unaligned_le64(qword, dsn);
12489
12490         bp->flags |= BNXT_FLAG_DSN_VALID;
12491         return 0;
12492 }
12493
12494 static int bnxt_map_db_bar(struct bnxt *bp)
12495 {
12496         if (!bp->db_size)
12497                 return -ENODEV;
12498         bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
12499         if (!bp->bar1)
12500                 return -ENOMEM;
12501         return 0;
12502 }
12503
12504 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
12505 {
12506         struct net_device *dev;
12507         struct bnxt *bp;
12508         int rc, max_irqs;
12509
12510         if (pci_is_bridge(pdev))
12511                 return -ENODEV;
12512
12513         /* Clear any pending DMA transactions from crash kernel
12514          * while loading driver in capture kernel.
12515          */
12516         if (is_kdump_kernel()) {
12517                 pci_clear_master(pdev);
12518                 pcie_flr(pdev);
12519         }
12520
12521         max_irqs = bnxt_get_max_irq(pdev);
12522         dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
12523         if (!dev)
12524                 return -ENOMEM;
12525
12526         bp = netdev_priv(dev);
12527         bp->msg_enable = BNXT_DEF_MSG_ENABLE;
12528         bnxt_set_max_func_irqs(bp, max_irqs);
12529
12530         if (bnxt_vf_pciid(ent->driver_data))
12531                 bp->flags |= BNXT_FLAG_VF;
12532
12533         if (pdev->msix_cap)
12534                 bp->flags |= BNXT_FLAG_MSIX_CAP;
12535
12536         rc = bnxt_init_board(pdev, dev);
12537         if (rc < 0)
12538                 goto init_err_free;
12539
12540         dev->netdev_ops = &bnxt_netdev_ops;
12541         dev->watchdog_timeo = BNXT_TX_TIMEOUT;
12542         dev->ethtool_ops = &bnxt_ethtool_ops;
12543         pci_set_drvdata(pdev, dev);
12544
12545         if (BNXT_PF(bp))
12546                 bnxt_vpd_read_info(bp);
12547
12548         rc = bnxt_alloc_hwrm_resources(bp);
12549         if (rc)
12550                 goto init_err_pci_clean;
12551
12552         mutex_init(&bp->hwrm_cmd_lock);
12553         mutex_init(&bp->link_lock);
12554
12555         rc = bnxt_fw_init_one_p1(bp);
12556         if (rc)
12557                 goto init_err_pci_clean;
12558
12559         if (BNXT_CHIP_P5(bp)) {
12560                 bp->flags |= BNXT_FLAG_CHIP_P5;
12561                 if (BNXT_CHIP_SR2(bp))
12562                         bp->flags |= BNXT_FLAG_CHIP_SR2;
12563         }
12564
12565         rc = bnxt_alloc_rss_indir_tbl(bp);
12566         if (rc)
12567                 goto init_err_pci_clean;
12568
12569         rc = bnxt_fw_init_one_p2(bp);
12570         if (rc)
12571                 goto init_err_pci_clean;
12572
12573         rc = bnxt_map_db_bar(bp);
12574         if (rc) {
12575                 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
12576                         rc);
12577                 goto init_err_pci_clean;
12578         }
12579
12580         dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
12581                            NETIF_F_TSO | NETIF_F_TSO6 |
12582                            NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
12583                            NETIF_F_GSO_IPXIP4 |
12584                            NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
12585                            NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
12586                            NETIF_F_RXCSUM | NETIF_F_GRO;
12587
12588         if (BNXT_SUPPORTS_TPA(bp))
12589                 dev->hw_features |= NETIF_F_LRO;
12590
12591         dev->hw_enc_features =
12592                         NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
12593                         NETIF_F_TSO | NETIF_F_TSO6 |
12594                         NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
12595                         NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
12596                         NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
12597         dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
12598
12599         dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
12600                                     NETIF_F_GSO_GRE_CSUM;
12601         dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
12602         if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
12603                 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
12604         if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
12605                 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
12606         if (BNXT_SUPPORTS_TPA(bp))
12607                 dev->hw_features |= NETIF_F_GRO_HW;
12608         dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
12609         if (dev->features & NETIF_F_GRO_HW)
12610                 dev->features &= ~NETIF_F_LRO;
12611         dev->priv_flags |= IFF_UNICAST_FLT;
12612
12613 #ifdef CONFIG_BNXT_SRIOV
12614         init_waitqueue_head(&bp->sriov_cfg_wait);
12615         mutex_init(&bp->sriov_lock);
12616 #endif
12617         if (BNXT_SUPPORTS_TPA(bp)) {
12618                 bp->gro_func = bnxt_gro_func_5730x;
12619                 if (BNXT_CHIP_P4(bp))
12620                         bp->gro_func = bnxt_gro_func_5731x;
12621                 else if (BNXT_CHIP_P5(bp))
12622                         bp->gro_func = bnxt_gro_func_5750x;
12623         }
12624         if (!BNXT_CHIP_P4_PLUS(bp))
12625                 bp->flags |= BNXT_FLAG_DOUBLE_DB;
12626
12627         bp->ulp_probe = bnxt_ulp_probe;
12628
12629         rc = bnxt_init_mac_addr(bp);
12630         if (rc) {
12631                 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
12632                 rc = -EADDRNOTAVAIL;
12633                 goto init_err_pci_clean;
12634         }
12635
12636         if (BNXT_PF(bp)) {
12637                 /* Read the adapter's DSN to use as the eswitch switch_id */
12638                 rc = bnxt_pcie_dsn_get(bp, bp->dsn);
12639         }
12640
12641         /* MTU range: 60 - FW defined max */
12642         dev->min_mtu = ETH_ZLEN;
12643         dev->max_mtu = bp->max_mtu;
12644
12645         rc = bnxt_probe_phy(bp, true);
12646         if (rc)
12647                 goto init_err_pci_clean;
12648
12649         bnxt_set_rx_skb_mode(bp, false);
12650         bnxt_set_tpa_flags(bp);
12651         bnxt_set_ring_params(bp);
12652         rc = bnxt_set_dflt_rings(bp, true);
12653         if (rc) {
12654                 netdev_err(bp->dev, "Not enough rings available.\n");
12655                 rc = -ENOMEM;
12656                 goto init_err_pci_clean;
12657         }
12658
12659         bnxt_fw_init_one_p3(bp);
12660
12661         if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
12662                 bp->flags |= BNXT_FLAG_STRIP_VLAN;
12663
12664         rc = bnxt_init_int_mode(bp);
12665         if (rc)
12666                 goto init_err_pci_clean;
12667
12668         /* No TC has been set yet and rings may have been trimmed due to
12669          * limited MSIX, so we re-initialize the TX rings per TC.
12670          */
12671         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12672
12673         if (BNXT_PF(bp)) {
12674                 if (!bnxt_pf_wq) {
12675                         bnxt_pf_wq =
12676                                 create_singlethread_workqueue("bnxt_pf_wq");
12677                         if (!bnxt_pf_wq) {
12678                                 dev_err(&pdev->dev, "Unable to create workqueue.\n");
12679                                 goto init_err_pci_clean;
12680                         }
12681                 }
12682                 rc = bnxt_init_tc(bp);
12683                 if (rc)
12684                         netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
12685                                    rc);
12686         }
12687
12688         bnxt_dl_register(bp);
12689
12690         rc = register_netdev(dev);
12691         if (rc)
12692                 goto init_err_cleanup;
12693
12694         if (BNXT_PF(bp))
12695                 devlink_port_type_eth_set(&bp->dl_port, bp->dev);
12696         bnxt_dl_fw_reporters_create(bp);
12697
12698         netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
12699                     board_info[ent->driver_data].name,
12700                     (long)pci_resource_start(pdev, 0), dev->dev_addr);
12701         pcie_print_link_status(pdev);
12702
12703         pci_save_state(pdev);
12704         return 0;
12705
12706 init_err_cleanup:
12707         bnxt_dl_unregister(bp);
12708         bnxt_shutdown_tc(bp);
12709         bnxt_clear_int_mode(bp);
12710
12711 init_err_pci_clean:
12712         bnxt_hwrm_func_drv_unrgtr(bp);
12713         bnxt_free_hwrm_short_cmd_req(bp);
12714         bnxt_free_hwrm_resources(bp);
12715         kfree(bp->fw_health);
12716         bp->fw_health = NULL;
12717         bnxt_cleanup_pci(bp);
12718         bnxt_free_ctx_mem(bp);
12719         kfree(bp->ctx);
12720         bp->ctx = NULL;
12721         kfree(bp->rss_indir_tbl);
12722         bp->rss_indir_tbl = NULL;
12723
12724 init_err_free:
12725         free_netdev(dev);
12726         return rc;
12727 }
12728
12729 static void bnxt_shutdown(struct pci_dev *pdev)
12730 {
12731         struct net_device *dev = pci_get_drvdata(pdev);
12732         struct bnxt *bp;
12733
12734         if (!dev)
12735                 return;
12736
12737         rtnl_lock();
12738         bp = netdev_priv(dev);
12739         if (!bp)
12740                 goto shutdown_exit;
12741
12742         if (netif_running(dev))
12743                 dev_close(dev);
12744
12745         bnxt_ulp_shutdown(bp);
12746         bnxt_clear_int_mode(bp);
12747         pci_disable_device(pdev);
12748
12749         if (system_state == SYSTEM_POWER_OFF) {
12750                 pci_wake_from_d3(pdev, bp->wol);
12751                 pci_set_power_state(pdev, PCI_D3hot);
12752         }
12753
12754 shutdown_exit:
12755         rtnl_unlock();
12756 }
12757
12758 #ifdef CONFIG_PM_SLEEP
12759 static int bnxt_suspend(struct device *device)
12760 {
12761         struct net_device *dev = dev_get_drvdata(device);
12762         struct bnxt *bp = netdev_priv(dev);
12763         int rc = 0;
12764
12765         rtnl_lock();
12766         bnxt_ulp_stop(bp);
12767         if (netif_running(dev)) {
12768                 netif_device_detach(dev);
12769                 rc = bnxt_close(dev);
12770         }
12771         bnxt_hwrm_func_drv_unrgtr(bp);
12772         pci_disable_device(bp->pdev);
12773         bnxt_free_ctx_mem(bp);
12774         kfree(bp->ctx);
12775         bp->ctx = NULL;
12776         rtnl_unlock();
12777         return rc;
12778 }
12779
12780 static int bnxt_resume(struct device *device)
12781 {
12782         struct net_device *dev = dev_get_drvdata(device);
12783         struct bnxt *bp = netdev_priv(dev);
12784         int rc = 0;
12785
12786         rtnl_lock();
12787         rc = pci_enable_device(bp->pdev);
12788         if (rc) {
12789                 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
12790                            rc);
12791                 goto resume_exit;
12792         }
12793         pci_set_master(bp->pdev);
12794         if (bnxt_hwrm_ver_get(bp)) {
12795                 rc = -ENODEV;
12796                 goto resume_exit;
12797         }
12798         rc = bnxt_hwrm_func_reset(bp);
12799         if (rc) {
12800                 rc = -EBUSY;
12801                 goto resume_exit;
12802         }
12803
12804         rc = bnxt_hwrm_func_qcaps(bp);
12805         if (rc)
12806                 goto resume_exit;
12807
12808         if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
12809                 rc = -ENODEV;
12810                 goto resume_exit;
12811         }
12812
12813         bnxt_get_wol_settings(bp);
12814         if (netif_running(dev)) {
12815                 rc = bnxt_open(dev);
12816                 if (!rc)
12817                         netif_device_attach(dev);
12818         }
12819
12820 resume_exit:
12821         bnxt_ulp_start(bp, rc);
12822         if (!rc)
12823                 bnxt_reenable_sriov(bp);
12824         rtnl_unlock();
12825         return rc;
12826 }
12827
12828 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
12829 #define BNXT_PM_OPS (&bnxt_pm_ops)
12830
12831 #else
12832
12833 #define BNXT_PM_OPS NULL
12834
12835 #endif /* CONFIG_PM_SLEEP */
12836
12837 /**
12838  * bnxt_io_error_detected - called when PCI error is detected
12839  * @pdev: Pointer to PCI device
12840  * @state: The current pci connection state
12841  *
12842  * This function is called after a PCI bus error affecting
12843  * this device has been detected.
12844  */
12845 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
12846                                                pci_channel_state_t state)
12847 {
12848         struct net_device *netdev = pci_get_drvdata(pdev);
12849         struct bnxt *bp = netdev_priv(netdev);
12850
12851         netdev_info(netdev, "PCI I/O error detected\n");
12852
12853         rtnl_lock();
12854         netif_device_detach(netdev);
12855
12856         bnxt_ulp_stop(bp);
12857
12858         if (state == pci_channel_io_perm_failure) {
12859                 rtnl_unlock();
12860                 return PCI_ERS_RESULT_DISCONNECT;
12861         }
12862
12863         if (netif_running(netdev))
12864                 bnxt_close(netdev);
12865
12866         pci_disable_device(pdev);
12867         bnxt_free_ctx_mem(bp);
12868         kfree(bp->ctx);
12869         bp->ctx = NULL;
12870         rtnl_unlock();
12871
12872         /* Request a slot slot reset. */
12873         return PCI_ERS_RESULT_NEED_RESET;
12874 }
12875
12876 /**
12877  * bnxt_io_slot_reset - called after the pci bus has been reset.
12878  * @pdev: Pointer to PCI device
12879  *
12880  * Restart the card from scratch, as if from a cold-boot.
12881  * At this point, the card has exprienced a hard reset,
12882  * followed by fixups by BIOS, and has its config space
12883  * set up identically to what it was at cold boot.
12884  */
12885 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
12886 {
12887         struct net_device *netdev = pci_get_drvdata(pdev);
12888         struct bnxt *bp = netdev_priv(netdev);
12889         int err = 0;
12890         pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
12891
12892         netdev_info(bp->dev, "PCI Slot Reset\n");
12893
12894         rtnl_lock();
12895
12896         if (pci_enable_device(pdev)) {
12897                 dev_err(&pdev->dev,
12898                         "Cannot re-enable PCI device after reset.\n");
12899         } else {
12900                 pci_set_master(pdev);
12901                 pci_restore_state(pdev);
12902                 pci_save_state(pdev);
12903
12904                 err = bnxt_hwrm_func_reset(bp);
12905                 if (!err) {
12906                         err = bnxt_hwrm_func_qcaps(bp);
12907                         if (!err && netif_running(netdev))
12908                                 err = bnxt_open(netdev);
12909                 }
12910                 bnxt_ulp_start(bp, err);
12911                 if (!err) {
12912                         bnxt_reenable_sriov(bp);
12913                         result = PCI_ERS_RESULT_RECOVERED;
12914                 }
12915         }
12916
12917         if (result != PCI_ERS_RESULT_RECOVERED) {
12918                 if (netif_running(netdev))
12919                         dev_close(netdev);
12920                 pci_disable_device(pdev);
12921         }
12922
12923         rtnl_unlock();
12924
12925         return result;
12926 }
12927
12928 /**
12929  * bnxt_io_resume - called when traffic can start flowing again.
12930  * @pdev: Pointer to PCI device
12931  *
12932  * This callback is called when the error recovery driver tells
12933  * us that its OK to resume normal operation.
12934  */
12935 static void bnxt_io_resume(struct pci_dev *pdev)
12936 {
12937         struct net_device *netdev = pci_get_drvdata(pdev);
12938
12939         rtnl_lock();
12940
12941         netif_device_attach(netdev);
12942
12943         rtnl_unlock();
12944 }
12945
12946 static const struct pci_error_handlers bnxt_err_handler = {
12947         .error_detected = bnxt_io_error_detected,
12948         .slot_reset     = bnxt_io_slot_reset,
12949         .resume         = bnxt_io_resume
12950 };
12951
12952 static struct pci_driver bnxt_pci_driver = {
12953         .name           = DRV_MODULE_NAME,
12954         .id_table       = bnxt_pci_tbl,
12955         .probe          = bnxt_init_one,
12956         .remove         = bnxt_remove_one,
12957         .shutdown       = bnxt_shutdown,
12958         .driver.pm      = BNXT_PM_OPS,
12959         .err_handler    = &bnxt_err_handler,
12960 #if defined(CONFIG_BNXT_SRIOV)
12961         .sriov_configure = bnxt_sriov_configure,
12962 #endif
12963 };
12964
12965 static int __init bnxt_init(void)
12966 {
12967         bnxt_debug_init();
12968         return pci_register_driver(&bnxt_pci_driver);
12969 }
12970
12971 static void __exit bnxt_exit(void)
12972 {
12973         pci_unregister_driver(&bnxt_pci_driver);
12974         if (bnxt_pf_wq)
12975                 destroy_workqueue(bnxt_pf_wq);
12976         bnxt_debug_exit();
12977 }
12978
12979 module_init(bnxt_init);
12980 module_exit(bnxt_exit);