sched: Fix race in task_call_func()
[platform/kernel/linux-starfive.git] / drivers / net / ethernet / broadcom / bnxt / bnxt.c
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2014-2016 Broadcom Corporation
4  * Copyright (c) 2016-2019 Broadcom Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  */
10
11 #include <linux/module.h>
12
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
35 #include <linux/if.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
40 #include <net/gro.h>
41 #include <net/ip.h>
42 #include <net/tcp.h>
43 #include <net/udp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <net/udp_tunnel.h>
47 #include <linux/workqueue.h>
48 #include <linux/prefetch.h>
49 #include <linux/cache.h>
50 #include <linux/log2.h>
51 #include <linux/aer.h>
52 #include <linux/bitmap.h>
53 #include <linux/cpu_rmap.h>
54 #include <linux/cpumask.h>
55 #include <net/pkt_cls.h>
56 #include <linux/hwmon.h>
57 #include <linux/hwmon-sysfs.h>
58 #include <net/page_pool.h>
59 #include <linux/align.h>
60
61 #include "bnxt_hsi.h"
62 #include "bnxt.h"
63 #include "bnxt_hwrm.h"
64 #include "bnxt_ulp.h"
65 #include "bnxt_sriov.h"
66 #include "bnxt_ethtool.h"
67 #include "bnxt_dcb.h"
68 #include "bnxt_xdp.h"
69 #include "bnxt_ptp.h"
70 #include "bnxt_vfr.h"
71 #include "bnxt_tc.h"
72 #include "bnxt_devlink.h"
73 #include "bnxt_debugfs.h"
74
75 #define BNXT_TX_TIMEOUT         (5 * HZ)
76 #define BNXT_DEF_MSG_ENABLE     (NETIF_MSG_DRV | NETIF_MSG_HW | \
77                                  NETIF_MSG_TX_ERR)
78
79 MODULE_LICENSE("GPL");
80 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
81
82 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
83 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
84 #define BNXT_RX_COPY_THRESH 256
85
86 #define BNXT_TX_PUSH_THRESH 164
87
88 /* indexed by enum board_idx */
89 static const struct {
90         char *name;
91 } board_info[] = {
92         [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
93         [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
94         [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
95         [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
96         [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
97         [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
98         [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
99         [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
100         [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
101         [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
102         [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
103         [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
104         [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
105         [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
106         [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
107         [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
108         [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
109         [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
110         [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
111         [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
112         [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
113         [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
114         [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
115         [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
116         [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
117         [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
118         [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
119         [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
120         [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
121         [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
122         [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
123         [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
124         [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
125         [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
126         [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
127         [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
128         [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
129         [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
130         [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
131         [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
132         [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
133         [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
134         [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
135         [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
136         [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
137 };
138
139 static const struct pci_device_id bnxt_pci_tbl[] = {
140         { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
141         { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
142         { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
143         { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
144         { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
145         { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
146         { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
147         { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
148         { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
149         { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
150         { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
151         { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
152         { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
153         { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
154         { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
155         { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
156         { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
157         { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
158         { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
159         { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
160         { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
161         { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
162         { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
163         { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
164         { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
165         { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
166         { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
167         { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
168         { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
169         { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
170         { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
171         { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
172         { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
173         { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
174         { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
175         { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
176         { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
177         { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
178         { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
179         { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
180         { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
181         { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
182         { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
183         { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
184         { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
185         { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
186 #ifdef CONFIG_BNXT_SRIOV
187         { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
188         { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
189         { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
190         { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
191         { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
192         { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
193         { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
194         { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
195         { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
196         { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
197         { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
198         { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
199         { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
200         { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
201         { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
202         { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
203         { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
204         { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
205         { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
206         { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
207         { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
208 #endif
209         { 0 }
210 };
211
212 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
213
214 static const u16 bnxt_vf_req_snif[] = {
215         HWRM_FUNC_CFG,
216         HWRM_FUNC_VF_CFG,
217         HWRM_PORT_PHY_QCFG,
218         HWRM_CFA_L2_FILTER_ALLOC,
219 };
220
221 static const u16 bnxt_async_events_arr[] = {
222         ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
223         ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
224         ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
225         ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
226         ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
227         ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
228         ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
229         ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
230         ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
231         ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
232         ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE,
233         ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
234         ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
235         ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
236         ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
237         ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE,
238 };
239
240 static struct workqueue_struct *bnxt_pf_wq;
241
242 static bool bnxt_vf_pciid(enum board_idx idx)
243 {
244         return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
245                 idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
246                 idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
247                 idx == NETXTREME_E_P5_VF_HV);
248 }
249
250 #define DB_CP_REARM_FLAGS       (DB_KEY_CP | DB_IDX_VALID)
251 #define DB_CP_FLAGS             (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
252 #define DB_CP_IRQ_DIS_FLAGS     (DB_KEY_CP | DB_IRQ_DIS)
253
254 #define BNXT_CP_DB_IRQ_DIS(db)                                          \
255                 writel(DB_CP_IRQ_DIS_FLAGS, db)
256
257 #define BNXT_DB_CQ(db, idx)                                             \
258         writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
259
260 #define BNXT_DB_NQ_P5(db, idx)                                          \
261         bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx),   \
262                     (db)->doorbell)
263
264 #define BNXT_DB_CQ_ARM(db, idx)                                         \
265         writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
266
267 #define BNXT_DB_NQ_ARM_P5(db, idx)                                      \
268         bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx),\
269                     (db)->doorbell)
270
271 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
272 {
273         if (bp->flags & BNXT_FLAG_CHIP_P5)
274                 BNXT_DB_NQ_P5(db, idx);
275         else
276                 BNXT_DB_CQ(db, idx);
277 }
278
279 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
280 {
281         if (bp->flags & BNXT_FLAG_CHIP_P5)
282                 BNXT_DB_NQ_ARM_P5(db, idx);
283         else
284                 BNXT_DB_CQ_ARM(db, idx);
285 }
286
287 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
288 {
289         if (bp->flags & BNXT_FLAG_CHIP_P5)
290                 bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL |
291                             RING_CMP(idx), db->doorbell);
292         else
293                 BNXT_DB_CQ(db, idx);
294 }
295
296 const u16 bnxt_lhint_arr[] = {
297         TX_BD_FLAGS_LHINT_512_AND_SMALLER,
298         TX_BD_FLAGS_LHINT_512_TO_1023,
299         TX_BD_FLAGS_LHINT_1024_TO_2047,
300         TX_BD_FLAGS_LHINT_1024_TO_2047,
301         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
302         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
303         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
304         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
305         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
306         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
307         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
308         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
309         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
310         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
311         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
312         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
313         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
314         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
315         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
316 };
317
318 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
319 {
320         struct metadata_dst *md_dst = skb_metadata_dst(skb);
321
322         if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
323                 return 0;
324
325         return md_dst->u.port_info.port_id;
326 }
327
328 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
329                              u16 prod)
330 {
331         bnxt_db_write(bp, &txr->tx_db, prod);
332         txr->kick_pending = 0;
333 }
334
335 static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
336                                           struct bnxt_tx_ring_info *txr,
337                                           struct netdev_queue *txq)
338 {
339         netif_tx_stop_queue(txq);
340
341         /* netif_tx_stop_queue() must be done before checking
342          * tx index in bnxt_tx_avail() below, because in
343          * bnxt_tx_int(), we update tx index before checking for
344          * netif_tx_queue_stopped().
345          */
346         smp_mb();
347         if (bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh) {
348                 netif_tx_wake_queue(txq);
349                 return false;
350         }
351
352         return true;
353 }
354
355 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
356 {
357         struct bnxt *bp = netdev_priv(dev);
358         struct tx_bd *txbd;
359         struct tx_bd_ext *txbd1;
360         struct netdev_queue *txq;
361         int i;
362         dma_addr_t mapping;
363         unsigned int length, pad = 0;
364         u32 len, free_size, vlan_tag_flags, cfa_action, flags;
365         u16 prod, last_frag;
366         struct pci_dev *pdev = bp->pdev;
367         struct bnxt_tx_ring_info *txr;
368         struct bnxt_sw_tx_bd *tx_buf;
369         __le32 lflags = 0;
370
371         i = skb_get_queue_mapping(skb);
372         if (unlikely(i >= bp->tx_nr_rings)) {
373                 dev_kfree_skb_any(skb);
374                 dev_core_stats_tx_dropped_inc(dev);
375                 return NETDEV_TX_OK;
376         }
377
378         txq = netdev_get_tx_queue(dev, i);
379         txr = &bp->tx_ring[bp->tx_ring_map[i]];
380         prod = txr->tx_prod;
381
382         free_size = bnxt_tx_avail(bp, txr);
383         if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
384                 /* We must have raced with NAPI cleanup */
385                 if (net_ratelimit() && txr->kick_pending)
386                         netif_warn(bp, tx_err, dev,
387                                    "bnxt: ring busy w/ flush pending!\n");
388                 if (bnxt_txr_netif_try_stop_queue(bp, txr, txq))
389                         return NETDEV_TX_BUSY;
390         }
391
392         length = skb->len;
393         len = skb_headlen(skb);
394         last_frag = skb_shinfo(skb)->nr_frags;
395
396         txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
397
398         txbd->tx_bd_opaque = prod;
399
400         tx_buf = &txr->tx_buf_ring[prod];
401         tx_buf->skb = skb;
402         tx_buf->nr_frags = last_frag;
403
404         vlan_tag_flags = 0;
405         cfa_action = bnxt_xmit_get_cfa_action(skb);
406         if (skb_vlan_tag_present(skb)) {
407                 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
408                                  skb_vlan_tag_get(skb);
409                 /* Currently supports 8021Q, 8021AD vlan offloads
410                  * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
411                  */
412                 if (skb->vlan_proto == htons(ETH_P_8021Q))
413                         vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
414         }
415
416         if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
417                 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
418
419                 if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) &&
420                     atomic_dec_if_positive(&ptp->tx_avail) >= 0) {
421                         if (!bnxt_ptp_parse(skb, &ptp->tx_seqid,
422                                             &ptp->tx_hdr_off)) {
423                                 if (vlan_tag_flags)
424                                         ptp->tx_hdr_off += VLAN_HLEN;
425                                 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
426                                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
427                         } else {
428                                 atomic_inc(&bp->ptp_cfg->tx_avail);
429                         }
430                 }
431         }
432
433         if (unlikely(skb->no_fcs))
434                 lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
435
436         if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
437             !lflags) {
438                 struct tx_push_buffer *tx_push_buf = txr->tx_push;
439                 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
440                 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
441                 void __iomem *db = txr->tx_db.doorbell;
442                 void *pdata = tx_push_buf->data;
443                 u64 *end;
444                 int j, push_len;
445
446                 /* Set COAL_NOW to be ready quickly for the next push */
447                 tx_push->tx_bd_len_flags_type =
448                         cpu_to_le32((length << TX_BD_LEN_SHIFT) |
449                                         TX_BD_TYPE_LONG_TX_BD |
450                                         TX_BD_FLAGS_LHINT_512_AND_SMALLER |
451                                         TX_BD_FLAGS_COAL_NOW |
452                                         TX_BD_FLAGS_PACKET_END |
453                                         (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
454
455                 if (skb->ip_summed == CHECKSUM_PARTIAL)
456                         tx_push1->tx_bd_hsize_lflags =
457                                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
458                 else
459                         tx_push1->tx_bd_hsize_lflags = 0;
460
461                 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
462                 tx_push1->tx_bd_cfa_action =
463                         cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
464
465                 end = pdata + length;
466                 end = PTR_ALIGN(end, 8) - 1;
467                 *end = 0;
468
469                 skb_copy_from_linear_data(skb, pdata, len);
470                 pdata += len;
471                 for (j = 0; j < last_frag; j++) {
472                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
473                         void *fptr;
474
475                         fptr = skb_frag_address_safe(frag);
476                         if (!fptr)
477                                 goto normal_tx;
478
479                         memcpy(pdata, fptr, skb_frag_size(frag));
480                         pdata += skb_frag_size(frag);
481                 }
482
483                 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
484                 txbd->tx_bd_haddr = txr->data_mapping;
485                 prod = NEXT_TX(prod);
486                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
487                 memcpy(txbd, tx_push1, sizeof(*txbd));
488                 prod = NEXT_TX(prod);
489                 tx_push->doorbell =
490                         cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
491                 txr->tx_prod = prod;
492
493                 tx_buf->is_push = 1;
494                 netdev_tx_sent_queue(txq, skb->len);
495                 wmb();  /* Sync is_push and byte queue before pushing data */
496
497                 push_len = (length + sizeof(*tx_push) + 7) / 8;
498                 if (push_len > 16) {
499                         __iowrite64_copy(db, tx_push_buf, 16);
500                         __iowrite32_copy(db + 4, tx_push_buf + 1,
501                                          (push_len - 16) << 1);
502                 } else {
503                         __iowrite64_copy(db, tx_push_buf, push_len);
504                 }
505
506                 goto tx_done;
507         }
508
509 normal_tx:
510         if (length < BNXT_MIN_PKT_SIZE) {
511                 pad = BNXT_MIN_PKT_SIZE - length;
512                 if (skb_pad(skb, pad))
513                         /* SKB already freed. */
514                         goto tx_kick_pending;
515                 length = BNXT_MIN_PKT_SIZE;
516         }
517
518         mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
519
520         if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
521                 goto tx_free;
522
523         dma_unmap_addr_set(tx_buf, mapping, mapping);
524         flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
525                 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
526
527         txbd->tx_bd_haddr = cpu_to_le64(mapping);
528
529         prod = NEXT_TX(prod);
530         txbd1 = (struct tx_bd_ext *)
531                 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
532
533         txbd1->tx_bd_hsize_lflags = lflags;
534         if (skb_is_gso(skb)) {
535                 u32 hdr_len;
536
537                 if (skb->encapsulation)
538                         hdr_len = skb_inner_tcp_all_headers(skb);
539                 else
540                         hdr_len = skb_tcp_all_headers(skb);
541
542                 txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
543                                         TX_BD_FLAGS_T_IPID |
544                                         (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
545                 length = skb_shinfo(skb)->gso_size;
546                 txbd1->tx_bd_mss = cpu_to_le32(length);
547                 length += hdr_len;
548         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
549                 txbd1->tx_bd_hsize_lflags |=
550                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
551                 txbd1->tx_bd_mss = 0;
552         }
553
554         length >>= 9;
555         if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
556                 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
557                                      skb->len);
558                 i = 0;
559                 goto tx_dma_error;
560         }
561         flags |= bnxt_lhint_arr[length];
562         txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
563
564         txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
565         txbd1->tx_bd_cfa_action =
566                         cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
567         for (i = 0; i < last_frag; i++) {
568                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
569
570                 prod = NEXT_TX(prod);
571                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
572
573                 len = skb_frag_size(frag);
574                 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
575                                            DMA_TO_DEVICE);
576
577                 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
578                         goto tx_dma_error;
579
580                 tx_buf = &txr->tx_buf_ring[prod];
581                 dma_unmap_addr_set(tx_buf, mapping, mapping);
582
583                 txbd->tx_bd_haddr = cpu_to_le64(mapping);
584
585                 flags = len << TX_BD_LEN_SHIFT;
586                 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
587         }
588
589         flags &= ~TX_BD_LEN;
590         txbd->tx_bd_len_flags_type =
591                 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
592                             TX_BD_FLAGS_PACKET_END);
593
594         netdev_tx_sent_queue(txq, skb->len);
595
596         skb_tx_timestamp(skb);
597
598         /* Sync BD data before updating doorbell */
599         wmb();
600
601         prod = NEXT_TX(prod);
602         txr->tx_prod = prod;
603
604         if (!netdev_xmit_more() || netif_xmit_stopped(txq))
605                 bnxt_txr_db_kick(bp, txr, prod);
606         else
607                 txr->kick_pending = 1;
608
609 tx_done:
610
611         if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
612                 if (netdev_xmit_more() && !tx_buf->is_push)
613                         bnxt_txr_db_kick(bp, txr, prod);
614
615                 bnxt_txr_netif_try_stop_queue(bp, txr, txq);
616         }
617         return NETDEV_TX_OK;
618
619 tx_dma_error:
620         if (BNXT_TX_PTP_IS_SET(lflags))
621                 atomic_inc(&bp->ptp_cfg->tx_avail);
622
623         last_frag = i;
624
625         /* start back at beginning and unmap skb */
626         prod = txr->tx_prod;
627         tx_buf = &txr->tx_buf_ring[prod];
628         dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
629                          skb_headlen(skb), DMA_TO_DEVICE);
630         prod = NEXT_TX(prod);
631
632         /* unmap remaining mapped pages */
633         for (i = 0; i < last_frag; i++) {
634                 prod = NEXT_TX(prod);
635                 tx_buf = &txr->tx_buf_ring[prod];
636                 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
637                                skb_frag_size(&skb_shinfo(skb)->frags[i]),
638                                DMA_TO_DEVICE);
639         }
640
641 tx_free:
642         dev_kfree_skb_any(skb);
643 tx_kick_pending:
644         if (txr->kick_pending)
645                 bnxt_txr_db_kick(bp, txr, txr->tx_prod);
646         txr->tx_buf_ring[txr->tx_prod].skb = NULL;
647         dev_core_stats_tx_dropped_inc(dev);
648         return NETDEV_TX_OK;
649 }
650
651 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
652 {
653         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
654         struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
655         u16 cons = txr->tx_cons;
656         struct pci_dev *pdev = bp->pdev;
657         int i;
658         unsigned int tx_bytes = 0;
659
660         for (i = 0; i < nr_pkts; i++) {
661                 struct bnxt_sw_tx_bd *tx_buf;
662                 struct sk_buff *skb;
663                 int j, last;
664
665                 tx_buf = &txr->tx_buf_ring[cons];
666                 cons = NEXT_TX(cons);
667                 skb = tx_buf->skb;
668                 tx_buf->skb = NULL;
669
670                 tx_bytes += skb->len;
671
672                 if (tx_buf->is_push) {
673                         tx_buf->is_push = 0;
674                         goto next_tx_int;
675                 }
676
677                 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
678                                  skb_headlen(skb), DMA_TO_DEVICE);
679                 last = tx_buf->nr_frags;
680
681                 for (j = 0; j < last; j++) {
682                         cons = NEXT_TX(cons);
683                         tx_buf = &txr->tx_buf_ring[cons];
684                         dma_unmap_page(
685                                 &pdev->dev,
686                                 dma_unmap_addr(tx_buf, mapping),
687                                 skb_frag_size(&skb_shinfo(skb)->frags[j]),
688                                 DMA_TO_DEVICE);
689                 }
690                 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
691                         if (bp->flags & BNXT_FLAG_CHIP_P5) {
692                                 /* PTP worker takes ownership of the skb */
693                                 if (!bnxt_get_tx_ts_p5(bp, skb))
694                                         skb = NULL;
695                                 else
696                                         atomic_inc(&bp->ptp_cfg->tx_avail);
697                         }
698                 }
699
700 next_tx_int:
701                 cons = NEXT_TX(cons);
702
703                 dev_kfree_skb_any(skb);
704         }
705
706         netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
707         txr->tx_cons = cons;
708
709         /* Need to make the tx_cons update visible to bnxt_start_xmit()
710          * before checking for netif_tx_queue_stopped().  Without the
711          * memory barrier, there is a small possibility that bnxt_start_xmit()
712          * will miss it and cause the queue to be stopped forever.
713          */
714         smp_mb();
715
716         if (unlikely(netif_tx_queue_stopped(txq)) &&
717             bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh &&
718             READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
719                 netif_tx_wake_queue(txq);
720 }
721
722 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
723                                          struct bnxt_rx_ring_info *rxr,
724                                          gfp_t gfp)
725 {
726         struct device *dev = &bp->pdev->dev;
727         struct page *page;
728
729         page = page_pool_dev_alloc_pages(rxr->page_pool);
730         if (!page)
731                 return NULL;
732
733         *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
734                                       DMA_ATTR_WEAK_ORDERING);
735         if (dma_mapping_error(dev, *mapping)) {
736                 page_pool_recycle_direct(rxr->page_pool, page);
737                 return NULL;
738         }
739         return page;
740 }
741
742 static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
743                                        gfp_t gfp)
744 {
745         u8 *data;
746         struct pci_dev *pdev = bp->pdev;
747
748         if (gfp == GFP_ATOMIC)
749                 data = napi_alloc_frag(bp->rx_buf_size);
750         else
751                 data = netdev_alloc_frag(bp->rx_buf_size);
752         if (!data)
753                 return NULL;
754
755         *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
756                                         bp->rx_buf_use_size, bp->rx_dir,
757                                         DMA_ATTR_WEAK_ORDERING);
758
759         if (dma_mapping_error(&pdev->dev, *mapping)) {
760                 skb_free_frag(data);
761                 data = NULL;
762         }
763         return data;
764 }
765
766 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
767                        u16 prod, gfp_t gfp)
768 {
769         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
770         struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
771         dma_addr_t mapping;
772
773         if (BNXT_RX_PAGE_MODE(bp)) {
774                 struct page *page =
775                         __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
776
777                 if (!page)
778                         return -ENOMEM;
779
780                 mapping += bp->rx_dma_offset;
781                 rx_buf->data = page;
782                 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
783         } else {
784                 u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp);
785
786                 if (!data)
787                         return -ENOMEM;
788
789                 rx_buf->data = data;
790                 rx_buf->data_ptr = data + bp->rx_offset;
791         }
792         rx_buf->mapping = mapping;
793
794         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
795         return 0;
796 }
797
798 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
799 {
800         u16 prod = rxr->rx_prod;
801         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
802         struct rx_bd *cons_bd, *prod_bd;
803
804         prod_rx_buf = &rxr->rx_buf_ring[prod];
805         cons_rx_buf = &rxr->rx_buf_ring[cons];
806
807         prod_rx_buf->data = data;
808         prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
809
810         prod_rx_buf->mapping = cons_rx_buf->mapping;
811
812         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
813         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
814
815         prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
816 }
817
818 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
819 {
820         u16 next, max = rxr->rx_agg_bmap_size;
821
822         next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
823         if (next >= max)
824                 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
825         return next;
826 }
827
828 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
829                                      struct bnxt_rx_ring_info *rxr,
830                                      u16 prod, gfp_t gfp)
831 {
832         struct rx_bd *rxbd =
833                 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
834         struct bnxt_sw_rx_agg_bd *rx_agg_buf;
835         struct pci_dev *pdev = bp->pdev;
836         struct page *page;
837         dma_addr_t mapping;
838         u16 sw_prod = rxr->rx_sw_agg_prod;
839         unsigned int offset = 0;
840
841         if (BNXT_RX_PAGE_MODE(bp)) {
842                 page = __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
843
844                 if (!page)
845                         return -ENOMEM;
846
847         } else {
848                 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
849                         page = rxr->rx_page;
850                         if (!page) {
851                                 page = alloc_page(gfp);
852                                 if (!page)
853                                         return -ENOMEM;
854                                 rxr->rx_page = page;
855                                 rxr->rx_page_offset = 0;
856                         }
857                         offset = rxr->rx_page_offset;
858                         rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
859                         if (rxr->rx_page_offset == PAGE_SIZE)
860                                 rxr->rx_page = NULL;
861                         else
862                                 get_page(page);
863                 } else {
864                         page = alloc_page(gfp);
865                         if (!page)
866                                 return -ENOMEM;
867                 }
868
869                 mapping = dma_map_page_attrs(&pdev->dev, page, offset,
870                                              BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
871                                              DMA_ATTR_WEAK_ORDERING);
872                 if (dma_mapping_error(&pdev->dev, mapping)) {
873                         __free_page(page);
874                         return -EIO;
875                 }
876         }
877
878         if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
879                 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
880
881         __set_bit(sw_prod, rxr->rx_agg_bmap);
882         rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
883         rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
884
885         rx_agg_buf->page = page;
886         rx_agg_buf->offset = offset;
887         rx_agg_buf->mapping = mapping;
888         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
889         rxbd->rx_bd_opaque = sw_prod;
890         return 0;
891 }
892
893 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
894                                        struct bnxt_cp_ring_info *cpr,
895                                        u16 cp_cons, u16 curr)
896 {
897         struct rx_agg_cmp *agg;
898
899         cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
900         agg = (struct rx_agg_cmp *)
901                 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
902         return agg;
903 }
904
905 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
906                                               struct bnxt_rx_ring_info *rxr,
907                                               u16 agg_id, u16 curr)
908 {
909         struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
910
911         return &tpa_info->agg_arr[curr];
912 }
913
914 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
915                                    u16 start, u32 agg_bufs, bool tpa)
916 {
917         struct bnxt_napi *bnapi = cpr->bnapi;
918         struct bnxt *bp = bnapi->bp;
919         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
920         u16 prod = rxr->rx_agg_prod;
921         u16 sw_prod = rxr->rx_sw_agg_prod;
922         bool p5_tpa = false;
923         u32 i;
924
925         if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
926                 p5_tpa = true;
927
928         for (i = 0; i < agg_bufs; i++) {
929                 u16 cons;
930                 struct rx_agg_cmp *agg;
931                 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
932                 struct rx_bd *prod_bd;
933                 struct page *page;
934
935                 if (p5_tpa)
936                         agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
937                 else
938                         agg = bnxt_get_agg(bp, cpr, idx, start + i);
939                 cons = agg->rx_agg_cmp_opaque;
940                 __clear_bit(cons, rxr->rx_agg_bmap);
941
942                 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
943                         sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
944
945                 __set_bit(sw_prod, rxr->rx_agg_bmap);
946                 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
947                 cons_rx_buf = &rxr->rx_agg_ring[cons];
948
949                 /* It is possible for sw_prod to be equal to cons, so
950                  * set cons_rx_buf->page to NULL first.
951                  */
952                 page = cons_rx_buf->page;
953                 cons_rx_buf->page = NULL;
954                 prod_rx_buf->page = page;
955                 prod_rx_buf->offset = cons_rx_buf->offset;
956
957                 prod_rx_buf->mapping = cons_rx_buf->mapping;
958
959                 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
960
961                 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
962                 prod_bd->rx_bd_opaque = sw_prod;
963
964                 prod = NEXT_RX_AGG(prod);
965                 sw_prod = NEXT_RX_AGG(sw_prod);
966         }
967         rxr->rx_agg_prod = prod;
968         rxr->rx_sw_agg_prod = sw_prod;
969 }
970
971 static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
972                                               struct bnxt_rx_ring_info *rxr,
973                                               u16 cons, void *data, u8 *data_ptr,
974                                               dma_addr_t dma_addr,
975                                               unsigned int offset_and_len)
976 {
977         unsigned int len = offset_and_len & 0xffff;
978         struct page *page = data;
979         u16 prod = rxr->rx_prod;
980         struct sk_buff *skb;
981         int err;
982
983         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
984         if (unlikely(err)) {
985                 bnxt_reuse_rx_data(rxr, cons, data);
986                 return NULL;
987         }
988         dma_addr -= bp->rx_dma_offset;
989         dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
990                              DMA_ATTR_WEAK_ORDERING);
991         skb = build_skb(page_address(page), BNXT_PAGE_MODE_BUF_SIZE +
992                                             bp->rx_dma_offset);
993         if (!skb) {
994                 __free_page(page);
995                 return NULL;
996         }
997         skb_mark_for_recycle(skb);
998         skb_reserve(skb, bp->rx_dma_offset);
999         __skb_put(skb, len);
1000
1001         return skb;
1002 }
1003
1004 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
1005                                         struct bnxt_rx_ring_info *rxr,
1006                                         u16 cons, void *data, u8 *data_ptr,
1007                                         dma_addr_t dma_addr,
1008                                         unsigned int offset_and_len)
1009 {
1010         unsigned int payload = offset_and_len >> 16;
1011         unsigned int len = offset_and_len & 0xffff;
1012         skb_frag_t *frag;
1013         struct page *page = data;
1014         u16 prod = rxr->rx_prod;
1015         struct sk_buff *skb;
1016         int off, err;
1017
1018         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1019         if (unlikely(err)) {
1020                 bnxt_reuse_rx_data(rxr, cons, data);
1021                 return NULL;
1022         }
1023         dma_addr -= bp->rx_dma_offset;
1024         dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
1025                              DMA_ATTR_WEAK_ORDERING);
1026
1027         if (unlikely(!payload))
1028                 payload = eth_get_headlen(bp->dev, data_ptr, len);
1029
1030         skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
1031         if (!skb) {
1032                 __free_page(page);
1033                 return NULL;
1034         }
1035
1036         skb_mark_for_recycle(skb);
1037         off = (void *)data_ptr - page_address(page);
1038         skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
1039         memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
1040                payload + NET_IP_ALIGN);
1041
1042         frag = &skb_shinfo(skb)->frags[0];
1043         skb_frag_size_sub(frag, payload);
1044         skb_frag_off_add(frag, payload);
1045         skb->data_len -= payload;
1046         skb->tail += payload;
1047
1048         return skb;
1049 }
1050
1051 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1052                                    struct bnxt_rx_ring_info *rxr, u16 cons,
1053                                    void *data, u8 *data_ptr,
1054                                    dma_addr_t dma_addr,
1055                                    unsigned int offset_and_len)
1056 {
1057         u16 prod = rxr->rx_prod;
1058         struct sk_buff *skb;
1059         int err;
1060
1061         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1062         if (unlikely(err)) {
1063                 bnxt_reuse_rx_data(rxr, cons, data);
1064                 return NULL;
1065         }
1066
1067         skb = build_skb(data, bp->rx_buf_size);
1068         dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1069                                bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
1070         if (!skb) {
1071                 skb_free_frag(data);
1072                 return NULL;
1073         }
1074
1075         skb_reserve(skb, bp->rx_offset);
1076         skb_put(skb, offset_and_len & 0xffff);
1077         return skb;
1078 }
1079
1080 static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
1081                                struct bnxt_cp_ring_info *cpr,
1082                                struct skb_shared_info *shinfo,
1083                                u16 idx, u32 agg_bufs, bool tpa,
1084                                struct xdp_buff *xdp)
1085 {
1086         struct bnxt_napi *bnapi = cpr->bnapi;
1087         struct pci_dev *pdev = bp->pdev;
1088         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1089         u16 prod = rxr->rx_agg_prod;
1090         u32 i, total_frag_len = 0;
1091         bool p5_tpa = false;
1092
1093         if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
1094                 p5_tpa = true;
1095
1096         for (i = 0; i < agg_bufs; i++) {
1097                 skb_frag_t *frag = &shinfo->frags[i];
1098                 u16 cons, frag_len;
1099                 struct rx_agg_cmp *agg;
1100                 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1101                 struct page *page;
1102                 dma_addr_t mapping;
1103
1104                 if (p5_tpa)
1105                         agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1106                 else
1107                         agg = bnxt_get_agg(bp, cpr, idx, i);
1108                 cons = agg->rx_agg_cmp_opaque;
1109                 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1110                             RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1111
1112                 cons_rx_buf = &rxr->rx_agg_ring[cons];
1113                 skb_frag_off_set(frag, cons_rx_buf->offset);
1114                 skb_frag_size_set(frag, frag_len);
1115                 __skb_frag_set_page(frag, cons_rx_buf->page);
1116                 shinfo->nr_frags = i + 1;
1117                 __clear_bit(cons, rxr->rx_agg_bmap);
1118
1119                 /* It is possible for bnxt_alloc_rx_page() to allocate
1120                  * a sw_prod index that equals the cons index, so we
1121                  * need to clear the cons entry now.
1122                  */
1123                 mapping = cons_rx_buf->mapping;
1124                 page = cons_rx_buf->page;
1125                 cons_rx_buf->page = NULL;
1126
1127                 if (xdp && page_is_pfmemalloc(page))
1128                         xdp_buff_set_frag_pfmemalloc(xdp);
1129
1130                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1131                         unsigned int nr_frags;
1132
1133                         nr_frags = --shinfo->nr_frags;
1134                         __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
1135                         cons_rx_buf->page = page;
1136
1137                         /* Update prod since possibly some pages have been
1138                          * allocated already.
1139                          */
1140                         rxr->rx_agg_prod = prod;
1141                         bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1142                         return 0;
1143                 }
1144
1145                 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1146                                      bp->rx_dir,
1147                                      DMA_ATTR_WEAK_ORDERING);
1148
1149                 total_frag_len += frag_len;
1150                 prod = NEXT_RX_AGG(prod);
1151         }
1152         rxr->rx_agg_prod = prod;
1153         return total_frag_len;
1154 }
1155
1156 static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
1157                                              struct bnxt_cp_ring_info *cpr,
1158                                              struct sk_buff *skb, u16 idx,
1159                                              u32 agg_bufs, bool tpa)
1160 {
1161         struct skb_shared_info *shinfo = skb_shinfo(skb);
1162         u32 total_frag_len = 0;
1163
1164         total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx,
1165                                              agg_bufs, tpa, NULL);
1166         if (!total_frag_len) {
1167                 dev_kfree_skb(skb);
1168                 return NULL;
1169         }
1170
1171         skb->data_len += total_frag_len;
1172         skb->len += total_frag_len;
1173         skb->truesize += PAGE_SIZE * agg_bufs;
1174         return skb;
1175 }
1176
1177 static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp,
1178                                  struct bnxt_cp_ring_info *cpr,
1179                                  struct xdp_buff *xdp, u16 idx,
1180                                  u32 agg_bufs, bool tpa)
1181 {
1182         struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp);
1183         u32 total_frag_len = 0;
1184
1185         if (!xdp_buff_has_frags(xdp))
1186                 shinfo->nr_frags = 0;
1187
1188         total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo,
1189                                              idx, agg_bufs, tpa, xdp);
1190         if (total_frag_len) {
1191                 xdp_buff_set_frags_flag(xdp);
1192                 shinfo->nr_frags = agg_bufs;
1193                 shinfo->xdp_frags_size = total_frag_len;
1194         }
1195         return total_frag_len;
1196 }
1197
1198 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1199                                u8 agg_bufs, u32 *raw_cons)
1200 {
1201         u16 last;
1202         struct rx_agg_cmp *agg;
1203
1204         *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1205         last = RING_CMP(*raw_cons);
1206         agg = (struct rx_agg_cmp *)
1207                 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1208         return RX_AGG_CMP_VALID(agg, *raw_cons);
1209 }
1210
1211 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1212                                             unsigned int len,
1213                                             dma_addr_t mapping)
1214 {
1215         struct bnxt *bp = bnapi->bp;
1216         struct pci_dev *pdev = bp->pdev;
1217         struct sk_buff *skb;
1218
1219         skb = napi_alloc_skb(&bnapi->napi, len);
1220         if (!skb)
1221                 return NULL;
1222
1223         dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1224                                 bp->rx_dir);
1225
1226         memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1227                len + NET_IP_ALIGN);
1228
1229         dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1230                                    bp->rx_dir);
1231
1232         skb_put(skb, len);
1233         return skb;
1234 }
1235
1236 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1237                            u32 *raw_cons, void *cmp)
1238 {
1239         struct rx_cmp *rxcmp = cmp;
1240         u32 tmp_raw_cons = *raw_cons;
1241         u8 cmp_type, agg_bufs = 0;
1242
1243         cmp_type = RX_CMP_TYPE(rxcmp);
1244
1245         if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1246                 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1247                             RX_CMP_AGG_BUFS) >>
1248                            RX_CMP_AGG_BUFS_SHIFT;
1249         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1250                 struct rx_tpa_end_cmp *tpa_end = cmp;
1251
1252                 if (bp->flags & BNXT_FLAG_CHIP_P5)
1253                         return 0;
1254
1255                 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1256         }
1257
1258         if (agg_bufs) {
1259                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1260                         return -EBUSY;
1261         }
1262         *raw_cons = tmp_raw_cons;
1263         return 0;
1264 }
1265
1266 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
1267 {
1268         if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
1269                 return;
1270
1271         if (BNXT_PF(bp))
1272                 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
1273         else
1274                 schedule_delayed_work(&bp->fw_reset_task, delay);
1275 }
1276
1277 static void bnxt_queue_sp_work(struct bnxt *bp)
1278 {
1279         if (BNXT_PF(bp))
1280                 queue_work(bnxt_pf_wq, &bp->sp_task);
1281         else
1282                 schedule_work(&bp->sp_task);
1283 }
1284
1285 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1286 {
1287         if (!rxr->bnapi->in_reset) {
1288                 rxr->bnapi->in_reset = true;
1289                 if (bp->flags & BNXT_FLAG_CHIP_P5)
1290                         set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1291                 else
1292                         set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
1293                 bnxt_queue_sp_work(bp);
1294         }
1295         rxr->rx_next_cons = 0xffff;
1296 }
1297
1298 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1299 {
1300         struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1301         u16 idx = agg_id & MAX_TPA_P5_MASK;
1302
1303         if (test_bit(idx, map->agg_idx_bmap))
1304                 idx = find_first_zero_bit(map->agg_idx_bmap,
1305                                           BNXT_AGG_IDX_BMAP_SIZE);
1306         __set_bit(idx, map->agg_idx_bmap);
1307         map->agg_id_tbl[agg_id] = idx;
1308         return idx;
1309 }
1310
1311 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1312 {
1313         struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1314
1315         __clear_bit(idx, map->agg_idx_bmap);
1316 }
1317
1318 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1319 {
1320         struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1321
1322         return map->agg_id_tbl[agg_id];
1323 }
1324
1325 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1326                            struct rx_tpa_start_cmp *tpa_start,
1327                            struct rx_tpa_start_cmp_ext *tpa_start1)
1328 {
1329         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1330         struct bnxt_tpa_info *tpa_info;
1331         u16 cons, prod, agg_id;
1332         struct rx_bd *prod_bd;
1333         dma_addr_t mapping;
1334
1335         if (bp->flags & BNXT_FLAG_CHIP_P5) {
1336                 agg_id = TPA_START_AGG_ID_P5(tpa_start);
1337                 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1338         } else {
1339                 agg_id = TPA_START_AGG_ID(tpa_start);
1340         }
1341         cons = tpa_start->rx_tpa_start_cmp_opaque;
1342         prod = rxr->rx_prod;
1343         cons_rx_buf = &rxr->rx_buf_ring[cons];
1344         prod_rx_buf = &rxr->rx_buf_ring[prod];
1345         tpa_info = &rxr->rx_tpa[agg_id];
1346
1347         if (unlikely(cons != rxr->rx_next_cons ||
1348                      TPA_START_ERROR(tpa_start))) {
1349                 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1350                             cons, rxr->rx_next_cons,
1351                             TPA_START_ERROR_CODE(tpa_start1));
1352                 bnxt_sched_reset(bp, rxr);
1353                 return;
1354         }
1355         /* Store cfa_code in tpa_info to use in tpa_end
1356          * completion processing.
1357          */
1358         tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1359         prod_rx_buf->data = tpa_info->data;
1360         prod_rx_buf->data_ptr = tpa_info->data_ptr;
1361
1362         mapping = tpa_info->mapping;
1363         prod_rx_buf->mapping = mapping;
1364
1365         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1366
1367         prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1368
1369         tpa_info->data = cons_rx_buf->data;
1370         tpa_info->data_ptr = cons_rx_buf->data_ptr;
1371         cons_rx_buf->data = NULL;
1372         tpa_info->mapping = cons_rx_buf->mapping;
1373
1374         tpa_info->len =
1375                 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1376                                 RX_TPA_START_CMP_LEN_SHIFT;
1377         if (likely(TPA_START_HASH_VALID(tpa_start))) {
1378                 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1379
1380                 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1381                 tpa_info->gso_type = SKB_GSO_TCPV4;
1382                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1383                 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
1384                         tpa_info->gso_type = SKB_GSO_TCPV6;
1385                 tpa_info->rss_hash =
1386                         le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1387         } else {
1388                 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1389                 tpa_info->gso_type = 0;
1390                 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
1391         }
1392         tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1393         tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1394         tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1395         tpa_info->agg_count = 0;
1396
1397         rxr->rx_prod = NEXT_RX(prod);
1398         cons = NEXT_RX(cons);
1399         rxr->rx_next_cons = NEXT_RX(cons);
1400         cons_rx_buf = &rxr->rx_buf_ring[cons];
1401
1402         bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1403         rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1404         cons_rx_buf->data = NULL;
1405 }
1406
1407 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1408 {
1409         if (agg_bufs)
1410                 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1411 }
1412
1413 #ifdef CONFIG_INET
1414 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1415 {
1416         struct udphdr *uh = NULL;
1417
1418         if (ip_proto == htons(ETH_P_IP)) {
1419                 struct iphdr *iph = (struct iphdr *)skb->data;
1420
1421                 if (iph->protocol == IPPROTO_UDP)
1422                         uh = (struct udphdr *)(iph + 1);
1423         } else {
1424                 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1425
1426                 if (iph->nexthdr == IPPROTO_UDP)
1427                         uh = (struct udphdr *)(iph + 1);
1428         }
1429         if (uh) {
1430                 if (uh->check)
1431                         skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1432                 else
1433                         skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1434         }
1435 }
1436 #endif
1437
1438 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1439                                            int payload_off, int tcp_ts,
1440                                            struct sk_buff *skb)
1441 {
1442 #ifdef CONFIG_INET
1443         struct tcphdr *th;
1444         int len, nw_off;
1445         u16 outer_ip_off, inner_ip_off, inner_mac_off;
1446         u32 hdr_info = tpa_info->hdr_info;
1447         bool loopback = false;
1448
1449         inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1450         inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1451         outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1452
1453         /* If the packet is an internal loopback packet, the offsets will
1454          * have an extra 4 bytes.
1455          */
1456         if (inner_mac_off == 4) {
1457                 loopback = true;
1458         } else if (inner_mac_off > 4) {
1459                 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1460                                             ETH_HLEN - 2));
1461
1462                 /* We only support inner iPv4/ipv6.  If we don't see the
1463                  * correct protocol ID, it must be a loopback packet where
1464                  * the offsets are off by 4.
1465                  */
1466                 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1467                         loopback = true;
1468         }
1469         if (loopback) {
1470                 /* internal loopback packet, subtract all offsets by 4 */
1471                 inner_ip_off -= 4;
1472                 inner_mac_off -= 4;
1473                 outer_ip_off -= 4;
1474         }
1475
1476         nw_off = inner_ip_off - ETH_HLEN;
1477         skb_set_network_header(skb, nw_off);
1478         if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1479                 struct ipv6hdr *iph = ipv6_hdr(skb);
1480
1481                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1482                 len = skb->len - skb_transport_offset(skb);
1483                 th = tcp_hdr(skb);
1484                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1485         } else {
1486                 struct iphdr *iph = ip_hdr(skb);
1487
1488                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1489                 len = skb->len - skb_transport_offset(skb);
1490                 th = tcp_hdr(skb);
1491                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1492         }
1493
1494         if (inner_mac_off) { /* tunnel */
1495                 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1496                                             ETH_HLEN - 2));
1497
1498                 bnxt_gro_tunnel(skb, proto);
1499         }
1500 #endif
1501         return skb;
1502 }
1503
1504 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1505                                            int payload_off, int tcp_ts,
1506                                            struct sk_buff *skb)
1507 {
1508 #ifdef CONFIG_INET
1509         u16 outer_ip_off, inner_ip_off, inner_mac_off;
1510         u32 hdr_info = tpa_info->hdr_info;
1511         int iphdr_len, nw_off;
1512
1513         inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1514         inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1515         outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1516
1517         nw_off = inner_ip_off - ETH_HLEN;
1518         skb_set_network_header(skb, nw_off);
1519         iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1520                      sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1521         skb_set_transport_header(skb, nw_off + iphdr_len);
1522
1523         if (inner_mac_off) { /* tunnel */
1524                 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1525                                             ETH_HLEN - 2));
1526
1527                 bnxt_gro_tunnel(skb, proto);
1528         }
1529 #endif
1530         return skb;
1531 }
1532
1533 #define BNXT_IPV4_HDR_SIZE      (sizeof(struct iphdr) + sizeof(struct tcphdr))
1534 #define BNXT_IPV6_HDR_SIZE      (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1535
1536 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1537                                            int payload_off, int tcp_ts,
1538                                            struct sk_buff *skb)
1539 {
1540 #ifdef CONFIG_INET
1541         struct tcphdr *th;
1542         int len, nw_off, tcp_opt_len = 0;
1543
1544         if (tcp_ts)
1545                 tcp_opt_len = 12;
1546
1547         if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1548                 struct iphdr *iph;
1549
1550                 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1551                          ETH_HLEN;
1552                 skb_set_network_header(skb, nw_off);
1553                 iph = ip_hdr(skb);
1554                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1555                 len = skb->len - skb_transport_offset(skb);
1556                 th = tcp_hdr(skb);
1557                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1558         } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1559                 struct ipv6hdr *iph;
1560
1561                 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1562                          ETH_HLEN;
1563                 skb_set_network_header(skb, nw_off);
1564                 iph = ipv6_hdr(skb);
1565                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1566                 len = skb->len - skb_transport_offset(skb);
1567                 th = tcp_hdr(skb);
1568                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1569         } else {
1570                 dev_kfree_skb_any(skb);
1571                 return NULL;
1572         }
1573
1574         if (nw_off) /* tunnel */
1575                 bnxt_gro_tunnel(skb, skb->protocol);
1576 #endif
1577         return skb;
1578 }
1579
1580 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1581                                            struct bnxt_tpa_info *tpa_info,
1582                                            struct rx_tpa_end_cmp *tpa_end,
1583                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1584                                            struct sk_buff *skb)
1585 {
1586 #ifdef CONFIG_INET
1587         int payload_off;
1588         u16 segs;
1589
1590         segs = TPA_END_TPA_SEGS(tpa_end);
1591         if (segs == 1)
1592                 return skb;
1593
1594         NAPI_GRO_CB(skb)->count = segs;
1595         skb_shinfo(skb)->gso_size =
1596                 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1597         skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1598         if (bp->flags & BNXT_FLAG_CHIP_P5)
1599                 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1600         else
1601                 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1602         skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1603         if (likely(skb))
1604                 tcp_gro_complete(skb);
1605 #endif
1606         return skb;
1607 }
1608
1609 /* Given the cfa_code of a received packet determine which
1610  * netdev (vf-rep or PF) the packet is destined to.
1611  */
1612 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1613 {
1614         struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1615
1616         /* if vf-rep dev is NULL, the must belongs to the PF */
1617         return dev ? dev : bp->dev;
1618 }
1619
1620 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1621                                            struct bnxt_cp_ring_info *cpr,
1622                                            u32 *raw_cons,
1623                                            struct rx_tpa_end_cmp *tpa_end,
1624                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1625                                            u8 *event)
1626 {
1627         struct bnxt_napi *bnapi = cpr->bnapi;
1628         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1629         u8 *data_ptr, agg_bufs;
1630         unsigned int len;
1631         struct bnxt_tpa_info *tpa_info;
1632         dma_addr_t mapping;
1633         struct sk_buff *skb;
1634         u16 idx = 0, agg_id;
1635         void *data;
1636         bool gro;
1637
1638         if (unlikely(bnapi->in_reset)) {
1639                 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1640
1641                 if (rc < 0)
1642                         return ERR_PTR(-EBUSY);
1643                 return NULL;
1644         }
1645
1646         if (bp->flags & BNXT_FLAG_CHIP_P5) {
1647                 agg_id = TPA_END_AGG_ID_P5(tpa_end);
1648                 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1649                 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1650                 tpa_info = &rxr->rx_tpa[agg_id];
1651                 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1652                         netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1653                                     agg_bufs, tpa_info->agg_count);
1654                         agg_bufs = tpa_info->agg_count;
1655                 }
1656                 tpa_info->agg_count = 0;
1657                 *event |= BNXT_AGG_EVENT;
1658                 bnxt_free_agg_idx(rxr, agg_id);
1659                 idx = agg_id;
1660                 gro = !!(bp->flags & BNXT_FLAG_GRO);
1661         } else {
1662                 agg_id = TPA_END_AGG_ID(tpa_end);
1663                 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1664                 tpa_info = &rxr->rx_tpa[agg_id];
1665                 idx = RING_CMP(*raw_cons);
1666                 if (agg_bufs) {
1667                         if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1668                                 return ERR_PTR(-EBUSY);
1669
1670                         *event |= BNXT_AGG_EVENT;
1671                         idx = NEXT_CMP(idx);
1672                 }
1673                 gro = !!TPA_END_GRO(tpa_end);
1674         }
1675         data = tpa_info->data;
1676         data_ptr = tpa_info->data_ptr;
1677         prefetch(data_ptr);
1678         len = tpa_info->len;
1679         mapping = tpa_info->mapping;
1680
1681         if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1682                 bnxt_abort_tpa(cpr, idx, agg_bufs);
1683                 if (agg_bufs > MAX_SKB_FRAGS)
1684                         netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1685                                     agg_bufs, (int)MAX_SKB_FRAGS);
1686                 return NULL;
1687         }
1688
1689         if (len <= bp->rx_copy_thresh) {
1690                 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1691                 if (!skb) {
1692                         bnxt_abort_tpa(cpr, idx, agg_bufs);
1693                         cpr->sw_stats.rx.rx_oom_discards += 1;
1694                         return NULL;
1695                 }
1696         } else {
1697                 u8 *new_data;
1698                 dma_addr_t new_mapping;
1699
1700                 new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC);
1701                 if (!new_data) {
1702                         bnxt_abort_tpa(cpr, idx, agg_bufs);
1703                         cpr->sw_stats.rx.rx_oom_discards += 1;
1704                         return NULL;
1705                 }
1706
1707                 tpa_info->data = new_data;
1708                 tpa_info->data_ptr = new_data + bp->rx_offset;
1709                 tpa_info->mapping = new_mapping;
1710
1711                 skb = build_skb(data, bp->rx_buf_size);
1712                 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1713                                        bp->rx_buf_use_size, bp->rx_dir,
1714                                        DMA_ATTR_WEAK_ORDERING);
1715
1716                 if (!skb) {
1717                         skb_free_frag(data);
1718                         bnxt_abort_tpa(cpr, idx, agg_bufs);
1719                         cpr->sw_stats.rx.rx_oom_discards += 1;
1720                         return NULL;
1721                 }
1722                 skb_reserve(skb, bp->rx_offset);
1723                 skb_put(skb, len);
1724         }
1725
1726         if (agg_bufs) {
1727                 skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
1728                 if (!skb) {
1729                         /* Page reuse already handled by bnxt_rx_pages(). */
1730                         cpr->sw_stats.rx.rx_oom_discards += 1;
1731                         return NULL;
1732                 }
1733         }
1734
1735         skb->protocol =
1736                 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
1737
1738         if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1739                 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1740
1741         if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1742             (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1743                 __be16 vlan_proto = htons(tpa_info->metadata >>
1744                                           RX_CMP_FLAGS2_METADATA_TPID_SFT);
1745                 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1746
1747                 if (eth_type_vlan(vlan_proto)) {
1748                         __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1749                 } else {
1750                         dev_kfree_skb(skb);
1751                         return NULL;
1752                 }
1753         }
1754
1755         skb_checksum_none_assert(skb);
1756         if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1757                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1758                 skb->csum_level =
1759                         (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1760         }
1761
1762         if (gro)
1763                 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1764
1765         return skb;
1766 }
1767
1768 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1769                          struct rx_agg_cmp *rx_agg)
1770 {
1771         u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1772         struct bnxt_tpa_info *tpa_info;
1773
1774         agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1775         tpa_info = &rxr->rx_tpa[agg_id];
1776         BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1777         tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1778 }
1779
1780 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1781                              struct sk_buff *skb)
1782 {
1783         if (skb->dev != bp->dev) {
1784                 /* this packet belongs to a vf-rep */
1785                 bnxt_vf_rep_rx(bp, skb);
1786                 return;
1787         }
1788         skb_record_rx_queue(skb, bnapi->index);
1789         napi_gro_receive(&bnapi->napi, skb);
1790 }
1791
1792 /* returns the following:
1793  * 1       - 1 packet successfully received
1794  * 0       - successful TPA_START, packet not completed yet
1795  * -EBUSY  - completion ring does not have all the agg buffers yet
1796  * -ENOMEM - packet aborted due to out of memory
1797  * -EIO    - packet aborted due to hw error indicated in BD
1798  */
1799 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1800                        u32 *raw_cons, u8 *event)
1801 {
1802         struct bnxt_napi *bnapi = cpr->bnapi;
1803         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1804         struct net_device *dev = bp->dev;
1805         struct rx_cmp *rxcmp;
1806         struct rx_cmp_ext *rxcmp1;
1807         u32 tmp_raw_cons = *raw_cons;
1808         u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1809         struct bnxt_sw_rx_bd *rx_buf;
1810         unsigned int len;
1811         u8 *data_ptr, agg_bufs, cmp_type;
1812         bool xdp_active = false;
1813         dma_addr_t dma_addr;
1814         struct sk_buff *skb;
1815         struct xdp_buff xdp;
1816         u32 flags, misc;
1817         void *data;
1818         int rc = 0;
1819
1820         rxcmp = (struct rx_cmp *)
1821                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1822
1823         cmp_type = RX_CMP_TYPE(rxcmp);
1824
1825         if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
1826                 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
1827                 goto next_rx_no_prod_no_len;
1828         }
1829
1830         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1831         cp_cons = RING_CMP(tmp_raw_cons);
1832         rxcmp1 = (struct rx_cmp_ext *)
1833                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1834
1835         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1836                 return -EBUSY;
1837
1838         /* The valid test of the entry must be done first before
1839          * reading any further.
1840          */
1841         dma_rmb();
1842         prod = rxr->rx_prod;
1843
1844         if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1845                 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1846                                (struct rx_tpa_start_cmp_ext *)rxcmp1);
1847
1848                 *event |= BNXT_RX_EVENT;
1849                 goto next_rx_no_prod_no_len;
1850
1851         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1852                 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
1853                                    (struct rx_tpa_end_cmp *)rxcmp,
1854                                    (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
1855
1856                 if (IS_ERR(skb))
1857                         return -EBUSY;
1858
1859                 rc = -ENOMEM;
1860                 if (likely(skb)) {
1861                         bnxt_deliver_skb(bp, bnapi, skb);
1862                         rc = 1;
1863                 }
1864                 *event |= BNXT_RX_EVENT;
1865                 goto next_rx_no_prod_no_len;
1866         }
1867
1868         cons = rxcmp->rx_cmp_opaque;
1869         if (unlikely(cons != rxr->rx_next_cons)) {
1870                 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
1871
1872                 /* 0xffff is forced error, don't print it */
1873                 if (rxr->rx_next_cons != 0xffff)
1874                         netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1875                                     cons, rxr->rx_next_cons);
1876                 bnxt_sched_reset(bp, rxr);
1877                 if (rc1)
1878                         return rc1;
1879                 goto next_rx_no_prod_no_len;
1880         }
1881         rx_buf = &rxr->rx_buf_ring[cons];
1882         data = rx_buf->data;
1883         data_ptr = rx_buf->data_ptr;
1884         prefetch(data_ptr);
1885
1886         misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1887         agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
1888
1889         if (agg_bufs) {
1890                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1891                         return -EBUSY;
1892
1893                 cp_cons = NEXT_CMP(cp_cons);
1894                 *event |= BNXT_AGG_EVENT;
1895         }
1896         *event |= BNXT_RX_EVENT;
1897
1898         rx_buf->data = NULL;
1899         if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1900                 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1901
1902                 bnxt_reuse_rx_data(rxr, cons, data);
1903                 if (agg_bufs)
1904                         bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
1905                                                false);
1906
1907                 rc = -EIO;
1908                 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
1909                         bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
1910                         if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
1911                             !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
1912                                 netdev_warn_once(bp->dev, "RX buffer error %x\n",
1913                                                  rx_err);
1914                                 bnxt_sched_reset(bp, rxr);
1915                         }
1916                 }
1917                 goto next_rx_no_len;
1918         }
1919
1920         flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
1921         len = flags >> RX_CMP_LEN_SHIFT;
1922         dma_addr = rx_buf->mapping;
1923
1924         if (bnxt_xdp_attached(bp, rxr)) {
1925                 bnxt_xdp_buff_init(bp, rxr, cons, &data_ptr, &len, &xdp);
1926                 if (agg_bufs) {
1927                         u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
1928                                                              cp_cons, agg_bufs,
1929                                                              false);
1930                         if (!frag_len) {
1931                                 cpr->sw_stats.rx.rx_oom_discards += 1;
1932                                 rc = -ENOMEM;
1933                                 goto next_rx;
1934                         }
1935                 }
1936                 xdp_active = true;
1937         }
1938
1939         if (xdp_active) {
1940                 if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &len, event)) {
1941                         rc = 1;
1942                         goto next_rx;
1943                 }
1944         }
1945
1946         if (len <= bp->rx_copy_thresh) {
1947                 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
1948                 bnxt_reuse_rx_data(rxr, cons, data);
1949                 if (!skb) {
1950                         if (agg_bufs) {
1951                                 if (!xdp_active)
1952                                         bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
1953                                                                agg_bufs, false);
1954                                 else
1955                                         bnxt_xdp_buff_frags_free(rxr, &xdp);
1956                         }
1957                         cpr->sw_stats.rx.rx_oom_discards += 1;
1958                         rc = -ENOMEM;
1959                         goto next_rx;
1960                 }
1961         } else {
1962                 u32 payload;
1963
1964                 if (rx_buf->data_ptr == data_ptr)
1965                         payload = misc & RX_CMP_PAYLOAD_OFFSET;
1966                 else
1967                         payload = 0;
1968                 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
1969                                       payload | len);
1970                 if (!skb) {
1971                         cpr->sw_stats.rx.rx_oom_discards += 1;
1972                         rc = -ENOMEM;
1973                         goto next_rx;
1974                 }
1975         }
1976
1977         if (agg_bufs) {
1978                 if (!xdp_active) {
1979                         skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
1980                         if (!skb) {
1981                                 cpr->sw_stats.rx.rx_oom_discards += 1;
1982                                 rc = -ENOMEM;
1983                                 goto next_rx;
1984                         }
1985                 } else {
1986                         skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
1987                         if (!skb) {
1988                                 /* we should be able to free the old skb here */
1989                                 bnxt_xdp_buff_frags_free(rxr, &xdp);
1990                                 cpr->sw_stats.rx.rx_oom_discards += 1;
1991                                 rc = -ENOMEM;
1992                                 goto next_rx;
1993                         }
1994                 }
1995         }
1996
1997         if (RX_CMP_HASH_VALID(rxcmp)) {
1998                 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1999                 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
2000
2001                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
2002                 if (hash_type != 1 && hash_type != 3)
2003                         type = PKT_HASH_TYPE_L3;
2004                 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
2005         }
2006
2007         cfa_code = RX_CMP_CFA_CODE(rxcmp1);
2008         skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
2009
2010         if ((rxcmp1->rx_cmp_flags2 &
2011              cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
2012             (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
2013                 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
2014                 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
2015                 __be16 vlan_proto = htons(meta_data >>
2016                                           RX_CMP_FLAGS2_METADATA_TPID_SFT);
2017
2018                 if (eth_type_vlan(vlan_proto)) {
2019                         __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
2020                 } else {
2021                         dev_kfree_skb(skb);
2022                         goto next_rx;
2023                 }
2024         }
2025
2026         skb_checksum_none_assert(skb);
2027         if (RX_CMP_L4_CS_OK(rxcmp1)) {
2028                 if (dev->features & NETIF_F_RXCSUM) {
2029                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2030                         skb->csum_level = RX_CMP_ENCAP(rxcmp1);
2031                 }
2032         } else {
2033                 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
2034                         if (dev->features & NETIF_F_RXCSUM)
2035                                 bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++;
2036                 }
2037         }
2038
2039         if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) ==
2040                      RX_CMP_FLAGS_ITYPE_PTP_W_TS) || bp->ptp_all_rx_tstamp) {
2041                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
2042                         u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
2043                         u64 ns, ts;
2044
2045                         if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
2046                                 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2047
2048                                 spin_lock_bh(&ptp->ptp_lock);
2049                                 ns = timecounter_cyc2time(&ptp->tc, ts);
2050                                 spin_unlock_bh(&ptp->ptp_lock);
2051                                 memset(skb_hwtstamps(skb), 0,
2052                                        sizeof(*skb_hwtstamps(skb)));
2053                                 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
2054                         }
2055                 }
2056         }
2057         bnxt_deliver_skb(bp, bnapi, skb);
2058         rc = 1;
2059
2060 next_rx:
2061         cpr->rx_packets += 1;
2062         cpr->rx_bytes += len;
2063
2064 next_rx_no_len:
2065         rxr->rx_prod = NEXT_RX(prod);
2066         rxr->rx_next_cons = NEXT_RX(cons);
2067
2068 next_rx_no_prod_no_len:
2069         *raw_cons = tmp_raw_cons;
2070
2071         return rc;
2072 }
2073
2074 /* In netpoll mode, if we are using a combined completion ring, we need to
2075  * discard the rx packets and recycle the buffers.
2076  */
2077 static int bnxt_force_rx_discard(struct bnxt *bp,
2078                                  struct bnxt_cp_ring_info *cpr,
2079                                  u32 *raw_cons, u8 *event)
2080 {
2081         u32 tmp_raw_cons = *raw_cons;
2082         struct rx_cmp_ext *rxcmp1;
2083         struct rx_cmp *rxcmp;
2084         u16 cp_cons;
2085         u8 cmp_type;
2086         int rc;
2087
2088         cp_cons = RING_CMP(tmp_raw_cons);
2089         rxcmp = (struct rx_cmp *)
2090                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2091
2092         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2093         cp_cons = RING_CMP(tmp_raw_cons);
2094         rxcmp1 = (struct rx_cmp_ext *)
2095                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2096
2097         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2098                 return -EBUSY;
2099
2100         /* The valid test of the entry must be done first before
2101          * reading any further.
2102          */
2103         dma_rmb();
2104         cmp_type = RX_CMP_TYPE(rxcmp);
2105         if (cmp_type == CMP_TYPE_RX_L2_CMP) {
2106                 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2107                         cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2108         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2109                 struct rx_tpa_end_cmp_ext *tpa_end1;
2110
2111                 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
2112                 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
2113                         cpu_to_le32(RX_TPA_END_CMP_ERRORS);
2114         }
2115         rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
2116         if (rc && rc != -EBUSY)
2117                 cpr->sw_stats.rx.rx_netpoll_discards += 1;
2118         return rc;
2119 }
2120
2121 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
2122 {
2123         struct bnxt_fw_health *fw_health = bp->fw_health;
2124         u32 reg = fw_health->regs[reg_idx];
2125         u32 reg_type, reg_off, val = 0;
2126
2127         reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
2128         reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
2129         switch (reg_type) {
2130         case BNXT_FW_HEALTH_REG_TYPE_CFG:
2131                 pci_read_config_dword(bp->pdev, reg_off, &val);
2132                 break;
2133         case BNXT_FW_HEALTH_REG_TYPE_GRC:
2134                 reg_off = fw_health->mapped_regs[reg_idx];
2135                 fallthrough;
2136         case BNXT_FW_HEALTH_REG_TYPE_BAR0:
2137                 val = readl(bp->bar0 + reg_off);
2138                 break;
2139         case BNXT_FW_HEALTH_REG_TYPE_BAR1:
2140                 val = readl(bp->bar1 + reg_off);
2141                 break;
2142         }
2143         if (reg_idx == BNXT_FW_RESET_INPROG_REG)
2144                 val &= fw_health->fw_reset_inprog_reg_mask;
2145         return val;
2146 }
2147
2148 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2149 {
2150         int i;
2151
2152         for (i = 0; i < bp->rx_nr_rings; i++) {
2153                 u16 grp_idx = bp->rx_ring[i].bnapi->index;
2154                 struct bnxt_ring_grp_info *grp_info;
2155
2156                 grp_info = &bp->grp_info[grp_idx];
2157                 if (grp_info->agg_fw_ring_id == ring_id)
2158                         return grp_idx;
2159         }
2160         return INVALID_HW_RING_ID;
2161 }
2162
2163 static void bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
2164 {
2165         u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1);
2166
2167         switch (err_type) {
2168         case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
2169                 netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
2170                            BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
2171                 break;
2172         case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM:
2173                 netdev_warn(bp->dev, "Pause Storm detected!\n");
2174                 break;
2175         case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
2176                 netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n");
2177                 break;
2178         default:
2179                 netdev_err(bp->dev, "FW reported unknown error type %u\n",
2180                            err_type);
2181                 break;
2182         }
2183 }
2184
2185 #define BNXT_GET_EVENT_PORT(data)       \
2186         ((data) &                       \
2187          ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
2188
2189 #define BNXT_EVENT_RING_TYPE(data2)     \
2190         ((data2) &                      \
2191          ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
2192
2193 #define BNXT_EVENT_RING_TYPE_RX(data2)  \
2194         (BNXT_EVENT_RING_TYPE(data2) == \
2195          ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
2196
2197 #define BNXT_EVENT_PHC_EVENT_TYPE(data1)        \
2198         (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\
2199          ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT)
2200
2201 #define BNXT_EVENT_PHC_RTC_UPDATE(data1)        \
2202         (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\
2203          ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT)
2204
2205 #define BNXT_PHC_BITS   48
2206
2207 static int bnxt_async_event_process(struct bnxt *bp,
2208                                     struct hwrm_async_event_cmpl *cmpl)
2209 {
2210         u16 event_id = le16_to_cpu(cmpl->event_id);
2211         u32 data1 = le32_to_cpu(cmpl->event_data1);
2212         u32 data2 = le32_to_cpu(cmpl->event_data2);
2213
2214         netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n",
2215                    event_id, data1, data2);
2216
2217         /* TODO CHIMP_FW: Define event id's for link change, error etc */
2218         switch (event_id) {
2219         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
2220                 struct bnxt_link_info *link_info = &bp->link_info;
2221
2222                 if (BNXT_VF(bp))
2223                         goto async_event_process_exit;
2224
2225                 /* print unsupported speed warning in forced speed mode only */
2226                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2227                     (data1 & 0x20000)) {
2228                         u16 fw_speed = link_info->force_link_speed;
2229                         u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2230
2231                         if (speed != SPEED_UNKNOWN)
2232                                 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2233                                             speed);
2234                 }
2235                 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
2236         }
2237                 fallthrough;
2238         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2239         case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2240                 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
2241                 fallthrough;
2242         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
2243                 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
2244                 break;
2245         case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
2246                 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
2247                 break;
2248         case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
2249                 u16 port_id = BNXT_GET_EVENT_PORT(data1);
2250
2251                 if (BNXT_VF(bp))
2252                         break;
2253
2254                 if (bp->pf.port_id != port_id)
2255                         break;
2256
2257                 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2258                 break;
2259         }
2260         case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
2261                 if (BNXT_PF(bp))
2262                         goto async_event_process_exit;
2263                 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2264                 break;
2265         case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2266                 char *type_str = "Solicited";
2267
2268                 if (!bp->fw_health)
2269                         goto async_event_process_exit;
2270
2271                 bp->fw_reset_timestamp = jiffies;
2272                 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2273                 if (!bp->fw_reset_min_dsecs)
2274                         bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2275                 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2276                 if (!bp->fw_reset_max_dsecs)
2277                         bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2278                 if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) {
2279                         set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state);
2280                 } else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2281                         type_str = "Fatal";
2282                         bp->fw_health->fatalities++;
2283                         set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2284                 } else if (data2 && BNXT_FW_STATUS_HEALTHY !=
2285                            EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) {
2286                         type_str = "Non-fatal";
2287                         bp->fw_health->survivals++;
2288                         set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
2289                 }
2290                 netif_warn(bp, hw, bp->dev,
2291                            "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2292                            type_str, data1, data2,
2293                            bp->fw_reset_min_dsecs * 100,
2294                            bp->fw_reset_max_dsecs * 100);
2295                 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2296                 break;
2297         }
2298         case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2299                 struct bnxt_fw_health *fw_health = bp->fw_health;
2300                 char *status_desc = "healthy";
2301                 u32 status;
2302
2303                 if (!fw_health)
2304                         goto async_event_process_exit;
2305
2306                 if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
2307                         fw_health->enabled = false;
2308                         netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n");
2309                         break;
2310                 }
2311                 fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2312                 fw_health->tmr_multiplier =
2313                         DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2314                                      bp->current_interval * 10);
2315                 fw_health->tmr_counter = fw_health->tmr_multiplier;
2316                 if (!fw_health->enabled)
2317                         fw_health->last_fw_heartbeat =
2318                                 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2319                 fw_health->last_fw_reset_cnt =
2320                         bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2321                 status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
2322                 if (status != BNXT_FW_STATUS_HEALTHY)
2323                         status_desc = "unhealthy";
2324                 netif_info(bp, drv, bp->dev,
2325                            "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n",
2326                            fw_health->primary ? "primary" : "backup", status,
2327                            status_desc, fw_health->last_fw_reset_cnt);
2328                 if (!fw_health->enabled) {
2329                         /* Make sure tmr_counter is set and visible to
2330                          * bnxt_health_check() before setting enabled to true.
2331                          */
2332                         smp_wmb();
2333                         fw_health->enabled = true;
2334                 }
2335                 goto async_event_process_exit;
2336         }
2337         case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
2338                 netif_notice(bp, hw, bp->dev,
2339                              "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2340                              data1, data2);
2341                 goto async_event_process_exit;
2342         case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
2343                 struct bnxt_rx_ring_info *rxr;
2344                 u16 grp_idx;
2345
2346                 if (bp->flags & BNXT_FLAG_CHIP_P5)
2347                         goto async_event_process_exit;
2348
2349                 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2350                             BNXT_EVENT_RING_TYPE(data2), data1);
2351                 if (!BNXT_EVENT_RING_TYPE_RX(data2))
2352                         goto async_event_process_exit;
2353
2354                 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2355                 if (grp_idx == INVALID_HW_RING_ID) {
2356                         netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2357                                     data1);
2358                         goto async_event_process_exit;
2359                 }
2360                 rxr = bp->bnapi[grp_idx]->rx_ring;
2361                 bnxt_sched_reset(bp, rxr);
2362                 goto async_event_process_exit;
2363         }
2364         case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
2365                 struct bnxt_fw_health *fw_health = bp->fw_health;
2366
2367                 netif_notice(bp, hw, bp->dev,
2368                              "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
2369                              data1, data2);
2370                 if (fw_health) {
2371                         fw_health->echo_req_data1 = data1;
2372                         fw_health->echo_req_data2 = data2;
2373                         set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2374                         break;
2375                 }
2376                 goto async_event_process_exit;
2377         }
2378         case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: {
2379                 bnxt_ptp_pps_event(bp, data1, data2);
2380                 goto async_event_process_exit;
2381         }
2382         case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
2383                 bnxt_event_error_report(bp, data1, data2);
2384                 goto async_event_process_exit;
2385         }
2386         case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: {
2387                 switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) {
2388                 case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE:
2389                         if (bp->fw_cap & BNXT_FW_CAP_PTP_RTC) {
2390                                 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2391                                 u64 ns;
2392
2393                                 spin_lock_bh(&ptp->ptp_lock);
2394                                 bnxt_ptp_update_current_time(bp);
2395                                 ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
2396                                        BNXT_PHC_BITS) | ptp->current_time);
2397                                 bnxt_ptp_rtc_timecounter_init(ptp, ns);
2398                                 spin_unlock_bh(&ptp->ptp_lock);
2399                         }
2400                         break;
2401                 }
2402                 goto async_event_process_exit;
2403         }
2404         case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: {
2405                 u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff;
2406
2407                 hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED);
2408                 goto async_event_process_exit;
2409         }
2410         default:
2411                 goto async_event_process_exit;
2412         }
2413         bnxt_queue_sp_work(bp);
2414 async_event_process_exit:
2415         bnxt_ulp_async_events(bp, cmpl);
2416         return 0;
2417 }
2418
2419 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2420 {
2421         u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2422         struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2423         struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2424                                 (struct hwrm_fwd_req_cmpl *)txcmp;
2425
2426         switch (cmpl_type) {
2427         case CMPL_BASE_TYPE_HWRM_DONE:
2428                 seq_id = le16_to_cpu(h_cmpl->sequence_id);
2429                 hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE);
2430                 break;
2431
2432         case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2433                 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2434
2435                 if ((vf_id < bp->pf.first_vf_id) ||
2436                     (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2437                         netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2438                                    vf_id);
2439                         return -EINVAL;
2440                 }
2441
2442                 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2443                 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
2444                 bnxt_queue_sp_work(bp);
2445                 break;
2446
2447         case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2448                 bnxt_async_event_process(bp,
2449                                          (struct hwrm_async_event_cmpl *)txcmp);
2450                 break;
2451
2452         default:
2453                 break;
2454         }
2455
2456         return 0;
2457 }
2458
2459 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2460 {
2461         struct bnxt_napi *bnapi = dev_instance;
2462         struct bnxt *bp = bnapi->bp;
2463         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2464         u32 cons = RING_CMP(cpr->cp_raw_cons);
2465
2466         cpr->event_ctr++;
2467         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2468         napi_schedule(&bnapi->napi);
2469         return IRQ_HANDLED;
2470 }
2471
2472 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2473 {
2474         u32 raw_cons = cpr->cp_raw_cons;
2475         u16 cons = RING_CMP(raw_cons);
2476         struct tx_cmp *txcmp;
2477
2478         txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2479
2480         return TX_CMP_VALID(txcmp, raw_cons);
2481 }
2482
2483 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
2484 {
2485         struct bnxt_napi *bnapi = dev_instance;
2486         struct bnxt *bp = bnapi->bp;
2487         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2488         u32 cons = RING_CMP(cpr->cp_raw_cons);
2489         u32 int_status;
2490
2491         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2492
2493         if (!bnxt_has_work(bp, cpr)) {
2494                 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
2495                 /* return if erroneous interrupt */
2496                 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
2497                         return IRQ_NONE;
2498         }
2499
2500         /* disable ring IRQ */
2501         BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
2502
2503         /* Return here if interrupt is shared and is disabled. */
2504         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2505                 return IRQ_HANDLED;
2506
2507         napi_schedule(&bnapi->napi);
2508         return IRQ_HANDLED;
2509 }
2510
2511 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2512                             int budget)
2513 {
2514         struct bnxt_napi *bnapi = cpr->bnapi;
2515         u32 raw_cons = cpr->cp_raw_cons;
2516         u32 cons;
2517         int tx_pkts = 0;
2518         int rx_pkts = 0;
2519         u8 event = 0;
2520         struct tx_cmp *txcmp;
2521
2522         cpr->has_more_work = 0;
2523         cpr->had_work_done = 1;
2524         while (1) {
2525                 int rc;
2526
2527                 cons = RING_CMP(raw_cons);
2528                 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2529
2530                 if (!TX_CMP_VALID(txcmp, raw_cons))
2531                         break;
2532
2533                 /* The valid test of the entry must be done first before
2534                  * reading any further.
2535                  */
2536                 dma_rmb();
2537                 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
2538                         tx_pkts++;
2539                         /* return full budget so NAPI will complete. */
2540                         if (unlikely(tx_pkts >= bp->tx_wake_thresh)) {
2541                                 rx_pkts = budget;
2542                                 raw_cons = NEXT_RAW_CMP(raw_cons);
2543                                 if (budget)
2544                                         cpr->has_more_work = 1;
2545                                 break;
2546                         }
2547                 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2548                         if (likely(budget))
2549                                 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2550                         else
2551                                 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2552                                                            &event);
2553                         if (likely(rc >= 0))
2554                                 rx_pkts += rc;
2555                         /* Increment rx_pkts when rc is -ENOMEM to count towards
2556                          * the NAPI budget.  Otherwise, we may potentially loop
2557                          * here forever if we consistently cannot allocate
2558                          * buffers.
2559                          */
2560                         else if (rc == -ENOMEM && budget)
2561                                 rx_pkts++;
2562                         else if (rc == -EBUSY)  /* partial completion */
2563                                 break;
2564                 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
2565                                      CMPL_BASE_TYPE_HWRM_DONE) ||
2566                                     (TX_CMP_TYPE(txcmp) ==
2567                                      CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
2568                                     (TX_CMP_TYPE(txcmp) ==
2569                                      CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
2570                         bnxt_hwrm_handler(bp, txcmp);
2571                 }
2572                 raw_cons = NEXT_RAW_CMP(raw_cons);
2573
2574                 if (rx_pkts && rx_pkts == budget) {
2575                         cpr->has_more_work = 1;
2576                         break;
2577                 }
2578         }
2579
2580         if (event & BNXT_REDIRECT_EVENT)
2581                 xdp_do_flush();
2582
2583         if (event & BNXT_TX_EVENT) {
2584                 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
2585                 u16 prod = txr->tx_prod;
2586
2587                 /* Sync BD data before updating doorbell */
2588                 wmb();
2589
2590                 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
2591         }
2592
2593         cpr->cp_raw_cons = raw_cons;
2594         bnapi->tx_pkts += tx_pkts;
2595         bnapi->events |= event;
2596         return rx_pkts;
2597 }
2598
2599 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
2600 {
2601         if (bnapi->tx_pkts) {
2602                 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
2603                 bnapi->tx_pkts = 0;
2604         }
2605
2606         if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
2607                 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2608
2609                 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2610         }
2611         if (bnapi->events & BNXT_AGG_EVENT) {
2612                 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2613
2614                 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2615         }
2616         bnapi->events = 0;
2617 }
2618
2619 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2620                           int budget)
2621 {
2622         struct bnxt_napi *bnapi = cpr->bnapi;
2623         int rx_pkts;
2624
2625         rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2626
2627         /* ACK completion ring before freeing tx ring and producing new
2628          * buffers in rx/agg rings to prevent overflowing the completion
2629          * ring.
2630          */
2631         bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2632
2633         __bnxt_poll_work_done(bp, bnapi);
2634         return rx_pkts;
2635 }
2636
2637 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2638 {
2639         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2640         struct bnxt *bp = bnapi->bp;
2641         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2642         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2643         struct tx_cmp *txcmp;
2644         struct rx_cmp_ext *rxcmp1;
2645         u32 cp_cons, tmp_raw_cons;
2646         u32 raw_cons = cpr->cp_raw_cons;
2647         u32 rx_pkts = 0;
2648         u8 event = 0;
2649
2650         while (1) {
2651                 int rc;
2652
2653                 cp_cons = RING_CMP(raw_cons);
2654                 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2655
2656                 if (!TX_CMP_VALID(txcmp, raw_cons))
2657                         break;
2658
2659                 /* The valid test of the entry must be done first before
2660                  * reading any further.
2661                  */
2662                 dma_rmb();
2663                 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2664                         tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2665                         cp_cons = RING_CMP(tmp_raw_cons);
2666                         rxcmp1 = (struct rx_cmp_ext *)
2667                           &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2668
2669                         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2670                                 break;
2671
2672                         /* force an error to recycle the buffer */
2673                         rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2674                                 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2675
2676                         rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2677                         if (likely(rc == -EIO) && budget)
2678                                 rx_pkts++;
2679                         else if (rc == -EBUSY)  /* partial completion */
2680                                 break;
2681                 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2682                                     CMPL_BASE_TYPE_HWRM_DONE)) {
2683                         bnxt_hwrm_handler(bp, txcmp);
2684                 } else {
2685                         netdev_err(bp->dev,
2686                                    "Invalid completion received on special ring\n");
2687                 }
2688                 raw_cons = NEXT_RAW_CMP(raw_cons);
2689
2690                 if (rx_pkts == budget)
2691                         break;
2692         }
2693
2694         cpr->cp_raw_cons = raw_cons;
2695         BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2696         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2697
2698         if (event & BNXT_AGG_EVENT)
2699                 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2700
2701         if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
2702                 napi_complete_done(napi, rx_pkts);
2703                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2704         }
2705         return rx_pkts;
2706 }
2707
2708 static int bnxt_poll(struct napi_struct *napi, int budget)
2709 {
2710         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2711         struct bnxt *bp = bnapi->bp;
2712         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2713         int work_done = 0;
2714
2715         if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2716                 napi_complete(napi);
2717                 return 0;
2718         }
2719         while (1) {
2720                 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
2721
2722                 if (work_done >= budget) {
2723                         if (!budget)
2724                                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2725                         break;
2726                 }
2727
2728                 if (!bnxt_has_work(bp, cpr)) {
2729                         if (napi_complete_done(napi, work_done))
2730                                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2731                         break;
2732                 }
2733         }
2734         if (bp->flags & BNXT_FLAG_DIM) {
2735                 struct dim_sample dim_sample = {};
2736
2737                 dim_update_sample(cpr->event_ctr,
2738                                   cpr->rx_packets,
2739                                   cpr->rx_bytes,
2740                                   &dim_sample);
2741                 net_dim(&cpr->dim, dim_sample);
2742         }
2743         return work_done;
2744 }
2745
2746 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2747 {
2748         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2749         int i, work_done = 0;
2750
2751         for (i = 0; i < 2; i++) {
2752                 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2753
2754                 if (cpr2) {
2755                         work_done += __bnxt_poll_work(bp, cpr2,
2756                                                       budget - work_done);
2757                         cpr->has_more_work |= cpr2->has_more_work;
2758                 }
2759         }
2760         return work_done;
2761 }
2762
2763 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2764                                  u64 dbr_type)
2765 {
2766         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2767         int i;
2768
2769         for (i = 0; i < 2; i++) {
2770                 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2771                 struct bnxt_db_info *db;
2772
2773                 if (cpr2 && cpr2->had_work_done) {
2774                         db = &cpr2->cp_db;
2775                         bnxt_writeq(bp, db->db_key64 | dbr_type |
2776                                     RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2777                         cpr2->had_work_done = 0;
2778                 }
2779         }
2780         __bnxt_poll_work_done(bp, bnapi);
2781 }
2782
2783 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2784 {
2785         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2786         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2787         struct bnxt_cp_ring_info *cpr_rx;
2788         u32 raw_cons = cpr->cp_raw_cons;
2789         struct bnxt *bp = bnapi->bp;
2790         struct nqe_cn *nqcmp;
2791         int work_done = 0;
2792         u32 cons;
2793
2794         if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2795                 napi_complete(napi);
2796                 return 0;
2797         }
2798         if (cpr->has_more_work) {
2799                 cpr->has_more_work = 0;
2800                 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
2801         }
2802         while (1) {
2803                 cons = RING_CMP(raw_cons);
2804                 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2805
2806                 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
2807                         if (cpr->has_more_work)
2808                                 break;
2809
2810                         __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL);
2811                         cpr->cp_raw_cons = raw_cons;
2812                         if (napi_complete_done(napi, work_done))
2813                                 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2814                                                   cpr->cp_raw_cons);
2815                         goto poll_done;
2816                 }
2817
2818                 /* The valid test of the entry must be done first before
2819                  * reading any further.
2820                  */
2821                 dma_rmb();
2822
2823                 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2824                         u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2825                         struct bnxt_cp_ring_info *cpr2;
2826
2827                         /* No more budget for RX work */
2828                         if (budget && work_done >= budget && idx == BNXT_RX_HDL)
2829                                 break;
2830
2831                         cpr2 = cpr->cp_ring_arr[idx];
2832                         work_done += __bnxt_poll_work(bp, cpr2,
2833                                                       budget - work_done);
2834                         cpr->has_more_work |= cpr2->has_more_work;
2835                 } else {
2836                         bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2837                 }
2838                 raw_cons = NEXT_RAW_CMP(raw_cons);
2839         }
2840         __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ);
2841         if (raw_cons != cpr->cp_raw_cons) {
2842                 cpr->cp_raw_cons = raw_cons;
2843                 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
2844         }
2845 poll_done:
2846         cpr_rx = cpr->cp_ring_arr[BNXT_RX_HDL];
2847         if (cpr_rx && (bp->flags & BNXT_FLAG_DIM)) {
2848                 struct dim_sample dim_sample = {};
2849
2850                 dim_update_sample(cpr->event_ctr,
2851                                   cpr_rx->rx_packets,
2852                                   cpr_rx->rx_bytes,
2853                                   &dim_sample);
2854                 net_dim(&cpr->dim, dim_sample);
2855         }
2856         return work_done;
2857 }
2858
2859 static void bnxt_free_tx_skbs(struct bnxt *bp)
2860 {
2861         int i, max_idx;
2862         struct pci_dev *pdev = bp->pdev;
2863
2864         if (!bp->tx_ring)
2865                 return;
2866
2867         max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2868         for (i = 0; i < bp->tx_nr_rings; i++) {
2869                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2870                 int j;
2871
2872                 if (!txr->tx_buf_ring)
2873                         continue;
2874
2875                 for (j = 0; j < max_idx;) {
2876                         struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2877                         struct sk_buff *skb;
2878                         int k, last;
2879
2880                         if (i < bp->tx_nr_rings_xdp &&
2881                             tx_buf->action == XDP_REDIRECT) {
2882                                 dma_unmap_single(&pdev->dev,
2883                                         dma_unmap_addr(tx_buf, mapping),
2884                                         dma_unmap_len(tx_buf, len),
2885                                         DMA_TO_DEVICE);
2886                                 xdp_return_frame(tx_buf->xdpf);
2887                                 tx_buf->action = 0;
2888                                 tx_buf->xdpf = NULL;
2889                                 j++;
2890                                 continue;
2891                         }
2892
2893                         skb = tx_buf->skb;
2894                         if (!skb) {
2895                                 j++;
2896                                 continue;
2897                         }
2898
2899                         tx_buf->skb = NULL;
2900
2901                         if (tx_buf->is_push) {
2902                                 dev_kfree_skb(skb);
2903                                 j += 2;
2904                                 continue;
2905                         }
2906
2907                         dma_unmap_single(&pdev->dev,
2908                                          dma_unmap_addr(tx_buf, mapping),
2909                                          skb_headlen(skb),
2910                                          DMA_TO_DEVICE);
2911
2912                         last = tx_buf->nr_frags;
2913                         j += 2;
2914                         for (k = 0; k < last; k++, j++) {
2915                                 int ring_idx = j & bp->tx_ring_mask;
2916                                 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2917
2918                                 tx_buf = &txr->tx_buf_ring[ring_idx];
2919                                 dma_unmap_page(
2920                                         &pdev->dev,
2921                                         dma_unmap_addr(tx_buf, mapping),
2922                                         skb_frag_size(frag), DMA_TO_DEVICE);
2923                         }
2924                         dev_kfree_skb(skb);
2925                 }
2926                 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2927         }
2928 }
2929
2930 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
2931 {
2932         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
2933         struct pci_dev *pdev = bp->pdev;
2934         struct bnxt_tpa_idx_map *map;
2935         int i, max_idx, max_agg_idx;
2936
2937         max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2938         max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2939         if (!rxr->rx_tpa)
2940                 goto skip_rx_tpa_free;
2941
2942         for (i = 0; i < bp->max_tpa; i++) {
2943                 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
2944                 u8 *data = tpa_info->data;
2945
2946                 if (!data)
2947                         continue;
2948
2949                 dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
2950                                        bp->rx_buf_use_size, bp->rx_dir,
2951                                        DMA_ATTR_WEAK_ORDERING);
2952
2953                 tpa_info->data = NULL;
2954
2955                 skb_free_frag(data);
2956         }
2957
2958 skip_rx_tpa_free:
2959         if (!rxr->rx_buf_ring)
2960                 goto skip_rx_buf_free;
2961
2962         for (i = 0; i < max_idx; i++) {
2963                 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
2964                 dma_addr_t mapping = rx_buf->mapping;
2965                 void *data = rx_buf->data;
2966
2967                 if (!data)
2968                         continue;
2969
2970                 rx_buf->data = NULL;
2971                 if (BNXT_RX_PAGE_MODE(bp)) {
2972                         mapping -= bp->rx_dma_offset;
2973                         dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE,
2974                                              bp->rx_dir,
2975                                              DMA_ATTR_WEAK_ORDERING);
2976                         page_pool_recycle_direct(rxr->page_pool, data);
2977                 } else {
2978                         dma_unmap_single_attrs(&pdev->dev, mapping,
2979                                                bp->rx_buf_use_size, bp->rx_dir,
2980                                                DMA_ATTR_WEAK_ORDERING);
2981                         skb_free_frag(data);
2982                 }
2983         }
2984
2985 skip_rx_buf_free:
2986         if (!rxr->rx_agg_ring)
2987                 goto skip_rx_agg_free;
2988
2989         for (i = 0; i < max_agg_idx; i++) {
2990                 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
2991                 struct page *page = rx_agg_buf->page;
2992
2993                 if (!page)
2994                         continue;
2995
2996                 if (BNXT_RX_PAGE_MODE(bp)) {
2997                         dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2998                                              BNXT_RX_PAGE_SIZE, bp->rx_dir,
2999                                              DMA_ATTR_WEAK_ORDERING);
3000                         rx_agg_buf->page = NULL;
3001                         __clear_bit(i, rxr->rx_agg_bmap);
3002
3003                         page_pool_recycle_direct(rxr->page_pool, page);
3004                 } else {
3005                         dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
3006                                              BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
3007                                              DMA_ATTR_WEAK_ORDERING);
3008                         rx_agg_buf->page = NULL;
3009                         __clear_bit(i, rxr->rx_agg_bmap);
3010
3011                         __free_page(page);
3012                 }
3013         }
3014
3015 skip_rx_agg_free:
3016         if (rxr->rx_page) {
3017                 __free_page(rxr->rx_page);
3018                 rxr->rx_page = NULL;
3019         }
3020         map = rxr->rx_tpa_idx_map;
3021         if (map)
3022                 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
3023 }
3024
3025 static void bnxt_free_rx_skbs(struct bnxt *bp)
3026 {
3027         int i;
3028
3029         if (!bp->rx_ring)
3030                 return;
3031
3032         for (i = 0; i < bp->rx_nr_rings; i++)
3033                 bnxt_free_one_rx_ring_skbs(bp, i);
3034 }
3035
3036 static void bnxt_free_skbs(struct bnxt *bp)
3037 {
3038         bnxt_free_tx_skbs(bp);
3039         bnxt_free_rx_skbs(bp);
3040 }
3041
3042 static void bnxt_init_ctx_mem(struct bnxt_mem_init *mem_init, void *p, int len)
3043 {
3044         u8 init_val = mem_init->init_val;
3045         u16 offset = mem_init->offset;
3046         u8 *p2 = p;
3047         int i;
3048
3049         if (!init_val)
3050                 return;
3051         if (offset == BNXT_MEM_INVALID_OFFSET) {
3052                 memset(p, init_val, len);
3053                 return;
3054         }
3055         for (i = 0; i < len; i += mem_init->size)
3056                 *(p2 + i + offset) = init_val;
3057 }
3058
3059 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3060 {
3061         struct pci_dev *pdev = bp->pdev;
3062         int i;
3063
3064         if (!rmem->pg_arr)
3065                 goto skip_pages;
3066
3067         for (i = 0; i < rmem->nr_pages; i++) {
3068                 if (!rmem->pg_arr[i])
3069                         continue;
3070
3071                 dma_free_coherent(&pdev->dev, rmem->page_size,
3072                                   rmem->pg_arr[i], rmem->dma_arr[i]);
3073
3074                 rmem->pg_arr[i] = NULL;
3075         }
3076 skip_pages:
3077         if (rmem->pg_tbl) {
3078                 size_t pg_tbl_size = rmem->nr_pages * 8;
3079
3080                 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3081                         pg_tbl_size = rmem->page_size;
3082                 dma_free_coherent(&pdev->dev, pg_tbl_size,
3083                                   rmem->pg_tbl, rmem->pg_tbl_map);
3084                 rmem->pg_tbl = NULL;
3085         }
3086         if (rmem->vmem_size && *rmem->vmem) {
3087                 vfree(*rmem->vmem);
3088                 *rmem->vmem = NULL;
3089         }
3090 }
3091
3092 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3093 {
3094         struct pci_dev *pdev = bp->pdev;
3095         u64 valid_bit = 0;
3096         int i;
3097
3098         if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
3099                 valid_bit = PTU_PTE_VALID;
3100         if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
3101                 size_t pg_tbl_size = rmem->nr_pages * 8;
3102
3103                 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3104                         pg_tbl_size = rmem->page_size;
3105                 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
3106                                                   &rmem->pg_tbl_map,
3107                                                   GFP_KERNEL);
3108                 if (!rmem->pg_tbl)
3109                         return -ENOMEM;
3110         }
3111
3112         for (i = 0; i < rmem->nr_pages; i++) {
3113                 u64 extra_bits = valid_bit;
3114
3115                 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
3116                                                      rmem->page_size,
3117                                                      &rmem->dma_arr[i],
3118                                                      GFP_KERNEL);
3119                 if (!rmem->pg_arr[i])
3120                         return -ENOMEM;
3121
3122                 if (rmem->mem_init)
3123                         bnxt_init_ctx_mem(rmem->mem_init, rmem->pg_arr[i],
3124                                           rmem->page_size);
3125                 if (rmem->nr_pages > 1 || rmem->depth > 0) {
3126                         if (i == rmem->nr_pages - 2 &&
3127                             (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3128                                 extra_bits |= PTU_PTE_NEXT_TO_LAST;
3129                         else if (i == rmem->nr_pages - 1 &&
3130                                  (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3131                                 extra_bits |= PTU_PTE_LAST;
3132                         rmem->pg_tbl[i] =
3133                                 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
3134                 }
3135         }
3136
3137         if (rmem->vmem_size) {
3138                 *rmem->vmem = vzalloc(rmem->vmem_size);
3139                 if (!(*rmem->vmem))
3140                         return -ENOMEM;
3141         }
3142         return 0;
3143 }
3144
3145 static void bnxt_free_tpa_info(struct bnxt *bp)
3146 {
3147         int i;
3148
3149         for (i = 0; i < bp->rx_nr_rings; i++) {
3150                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3151
3152                 kfree(rxr->rx_tpa_idx_map);
3153                 rxr->rx_tpa_idx_map = NULL;
3154                 if (rxr->rx_tpa) {
3155                         kfree(rxr->rx_tpa[0].agg_arr);
3156                         rxr->rx_tpa[0].agg_arr = NULL;
3157                 }
3158                 kfree(rxr->rx_tpa);
3159                 rxr->rx_tpa = NULL;
3160         }
3161 }
3162
3163 static int bnxt_alloc_tpa_info(struct bnxt *bp)
3164 {
3165         int i, j, total_aggs = 0;
3166
3167         bp->max_tpa = MAX_TPA;
3168         if (bp->flags & BNXT_FLAG_CHIP_P5) {
3169                 if (!bp->max_tpa_v2)
3170                         return 0;
3171                 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
3172                 total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
3173         }
3174
3175         for (i = 0; i < bp->rx_nr_rings; i++) {
3176                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3177                 struct rx_agg_cmp *agg;
3178
3179                 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
3180                                       GFP_KERNEL);
3181                 if (!rxr->rx_tpa)
3182                         return -ENOMEM;
3183
3184                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3185                         continue;
3186                 agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
3187                 rxr->rx_tpa[0].agg_arr = agg;
3188                 if (!agg)
3189                         return -ENOMEM;
3190                 for (j = 1; j < bp->max_tpa; j++)
3191                         rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
3192                 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
3193                                               GFP_KERNEL);
3194                 if (!rxr->rx_tpa_idx_map)
3195                         return -ENOMEM;
3196         }
3197         return 0;
3198 }
3199
3200 static void bnxt_free_rx_rings(struct bnxt *bp)
3201 {
3202         int i;
3203
3204         if (!bp->rx_ring)
3205                 return;
3206
3207         bnxt_free_tpa_info(bp);
3208         for (i = 0; i < bp->rx_nr_rings; i++) {
3209                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3210                 struct bnxt_ring_struct *ring;
3211
3212                 if (rxr->xdp_prog)
3213                         bpf_prog_put(rxr->xdp_prog);
3214
3215                 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
3216                         xdp_rxq_info_unreg(&rxr->xdp_rxq);
3217
3218                 page_pool_destroy(rxr->page_pool);
3219                 rxr->page_pool = NULL;
3220
3221                 kfree(rxr->rx_agg_bmap);
3222                 rxr->rx_agg_bmap = NULL;
3223
3224                 ring = &rxr->rx_ring_struct;
3225                 bnxt_free_ring(bp, &ring->ring_mem);
3226
3227                 ring = &rxr->rx_agg_ring_struct;
3228                 bnxt_free_ring(bp, &ring->ring_mem);
3229         }
3230 }
3231
3232 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
3233                                    struct bnxt_rx_ring_info *rxr)
3234 {
3235         struct page_pool_params pp = { 0 };
3236
3237         pp.pool_size = bp->rx_ring_size;
3238         pp.nid = dev_to_node(&bp->pdev->dev);
3239         pp.dev = &bp->pdev->dev;
3240         pp.dma_dir = DMA_BIDIRECTIONAL;
3241
3242         rxr->page_pool = page_pool_create(&pp);
3243         if (IS_ERR(rxr->page_pool)) {
3244                 int err = PTR_ERR(rxr->page_pool);
3245
3246                 rxr->page_pool = NULL;
3247                 return err;
3248         }
3249         return 0;
3250 }
3251
3252 static int bnxt_alloc_rx_rings(struct bnxt *bp)
3253 {
3254         int i, rc = 0, agg_rings = 0;
3255
3256         if (!bp->rx_ring)
3257                 return -ENOMEM;
3258
3259         if (bp->flags & BNXT_FLAG_AGG_RINGS)
3260                 agg_rings = 1;
3261
3262         for (i = 0; i < bp->rx_nr_rings; i++) {
3263                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3264                 struct bnxt_ring_struct *ring;
3265
3266                 ring = &rxr->rx_ring_struct;
3267
3268                 rc = bnxt_alloc_rx_page_pool(bp, rxr);
3269                 if (rc)
3270                         return rc;
3271
3272                 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
3273                 if (rc < 0)
3274                         return rc;
3275
3276                 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
3277                                                 MEM_TYPE_PAGE_POOL,
3278                                                 rxr->page_pool);
3279                 if (rc) {
3280                         xdp_rxq_info_unreg(&rxr->xdp_rxq);
3281                         return rc;
3282                 }
3283
3284                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3285                 if (rc)
3286                         return rc;
3287
3288                 ring->grp_idx = i;
3289                 if (agg_rings) {
3290                         u16 mem_size;
3291
3292                         ring = &rxr->rx_agg_ring_struct;
3293                         rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3294                         if (rc)
3295                                 return rc;
3296
3297                         ring->grp_idx = i;
3298                         rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3299                         mem_size = rxr->rx_agg_bmap_size / 8;
3300                         rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
3301                         if (!rxr->rx_agg_bmap)
3302                                 return -ENOMEM;
3303                 }
3304         }
3305         if (bp->flags & BNXT_FLAG_TPA)
3306                 rc = bnxt_alloc_tpa_info(bp);
3307         return rc;
3308 }
3309
3310 static void bnxt_free_tx_rings(struct bnxt *bp)
3311 {
3312         int i;
3313         struct pci_dev *pdev = bp->pdev;
3314
3315         if (!bp->tx_ring)
3316                 return;
3317
3318         for (i = 0; i < bp->tx_nr_rings; i++) {
3319                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3320                 struct bnxt_ring_struct *ring;
3321
3322                 if (txr->tx_push) {
3323                         dma_free_coherent(&pdev->dev, bp->tx_push_size,
3324                                           txr->tx_push, txr->tx_push_mapping);
3325                         txr->tx_push = NULL;
3326                 }
3327
3328                 ring = &txr->tx_ring_struct;
3329
3330                 bnxt_free_ring(bp, &ring->ring_mem);
3331         }
3332 }
3333
3334 static int bnxt_alloc_tx_rings(struct bnxt *bp)
3335 {
3336         int i, j, rc;
3337         struct pci_dev *pdev = bp->pdev;
3338
3339         bp->tx_push_size = 0;
3340         if (bp->tx_push_thresh) {
3341                 int push_size;
3342
3343                 push_size  = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
3344                                         bp->tx_push_thresh);
3345
3346                 if (push_size > 256) {
3347                         push_size = 0;
3348                         bp->tx_push_thresh = 0;
3349                 }
3350
3351                 bp->tx_push_size = push_size;
3352         }
3353
3354         for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
3355                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3356                 struct bnxt_ring_struct *ring;
3357                 u8 qidx;
3358
3359                 ring = &txr->tx_ring_struct;
3360
3361                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3362                 if (rc)
3363                         return rc;
3364
3365                 ring->grp_idx = txr->bnapi->index;
3366                 if (bp->tx_push_size) {
3367                         dma_addr_t mapping;
3368
3369                         /* One pre-allocated DMA buffer to backup
3370                          * TX push operation
3371                          */
3372                         txr->tx_push = dma_alloc_coherent(&pdev->dev,
3373                                                 bp->tx_push_size,
3374                                                 &txr->tx_push_mapping,
3375                                                 GFP_KERNEL);
3376
3377                         if (!txr->tx_push)
3378                                 return -ENOMEM;
3379
3380                         mapping = txr->tx_push_mapping +
3381                                 sizeof(struct tx_push_bd);
3382                         txr->data_mapping = cpu_to_le64(mapping);
3383                 }
3384                 qidx = bp->tc_to_qidx[j];
3385                 ring->queue_id = bp->q_info[qidx].queue_id;
3386                 spin_lock_init(&txr->xdp_tx_lock);
3387                 if (i < bp->tx_nr_rings_xdp)
3388                         continue;
3389                 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
3390                         j++;
3391         }
3392         return 0;
3393 }
3394
3395 static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
3396 {
3397         struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3398
3399         kfree(cpr->cp_desc_ring);
3400         cpr->cp_desc_ring = NULL;
3401         ring->ring_mem.pg_arr = NULL;
3402         kfree(cpr->cp_desc_mapping);
3403         cpr->cp_desc_mapping = NULL;
3404         ring->ring_mem.dma_arr = NULL;
3405 }
3406
3407 static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
3408 {
3409         cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL);
3410         if (!cpr->cp_desc_ring)
3411                 return -ENOMEM;
3412         cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping),
3413                                        GFP_KERNEL);
3414         if (!cpr->cp_desc_mapping)
3415                 return -ENOMEM;
3416         return 0;
3417 }
3418
3419 static void bnxt_free_all_cp_arrays(struct bnxt *bp)
3420 {
3421         int i;
3422
3423         if (!bp->bnapi)
3424                 return;
3425         for (i = 0; i < bp->cp_nr_rings; i++) {
3426                 struct bnxt_napi *bnapi = bp->bnapi[i];
3427
3428                 if (!bnapi)
3429                         continue;
3430                 bnxt_free_cp_arrays(&bnapi->cp_ring);
3431         }
3432 }
3433
3434 static int bnxt_alloc_all_cp_arrays(struct bnxt *bp)
3435 {
3436         int i, n = bp->cp_nr_pages;
3437
3438         for (i = 0; i < bp->cp_nr_rings; i++) {
3439                 struct bnxt_napi *bnapi = bp->bnapi[i];
3440                 int rc;
3441
3442                 if (!bnapi)
3443                         continue;
3444                 rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n);
3445                 if (rc)
3446                         return rc;
3447         }
3448         return 0;
3449 }
3450
3451 static void bnxt_free_cp_rings(struct bnxt *bp)
3452 {
3453         int i;
3454
3455         if (!bp->bnapi)
3456                 return;
3457
3458         for (i = 0; i < bp->cp_nr_rings; i++) {
3459                 struct bnxt_napi *bnapi = bp->bnapi[i];
3460                 struct bnxt_cp_ring_info *cpr;
3461                 struct bnxt_ring_struct *ring;
3462                 int j;
3463
3464                 if (!bnapi)
3465                         continue;
3466
3467                 cpr = &bnapi->cp_ring;
3468                 ring = &cpr->cp_ring_struct;
3469
3470                 bnxt_free_ring(bp, &ring->ring_mem);
3471
3472                 for (j = 0; j < 2; j++) {
3473                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3474
3475                         if (cpr2) {
3476                                 ring = &cpr2->cp_ring_struct;
3477                                 bnxt_free_ring(bp, &ring->ring_mem);
3478                                 bnxt_free_cp_arrays(cpr2);
3479                                 kfree(cpr2);
3480                                 cpr->cp_ring_arr[j] = NULL;
3481                         }
3482                 }
3483         }
3484 }
3485
3486 static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
3487 {
3488         struct bnxt_ring_mem_info *rmem;
3489         struct bnxt_ring_struct *ring;
3490         struct bnxt_cp_ring_info *cpr;
3491         int rc;
3492
3493         cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
3494         if (!cpr)
3495                 return NULL;
3496
3497         rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
3498         if (rc) {
3499                 bnxt_free_cp_arrays(cpr);
3500                 kfree(cpr);
3501                 return NULL;
3502         }
3503         ring = &cpr->cp_ring_struct;
3504         rmem = &ring->ring_mem;
3505         rmem->nr_pages = bp->cp_nr_pages;
3506         rmem->page_size = HW_CMPD_RING_SIZE;
3507         rmem->pg_arr = (void **)cpr->cp_desc_ring;
3508         rmem->dma_arr = cpr->cp_desc_mapping;
3509         rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
3510         rc = bnxt_alloc_ring(bp, rmem);
3511         if (rc) {
3512                 bnxt_free_ring(bp, rmem);
3513                 bnxt_free_cp_arrays(cpr);
3514                 kfree(cpr);
3515                 cpr = NULL;
3516         }
3517         return cpr;
3518 }
3519
3520 static int bnxt_alloc_cp_rings(struct bnxt *bp)
3521 {
3522         bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
3523         int i, rc, ulp_base_vec, ulp_msix;
3524
3525         ulp_msix = bnxt_get_ulp_msix_num(bp);
3526         ulp_base_vec = bnxt_get_ulp_msix_base(bp);
3527         for (i = 0; i < bp->cp_nr_rings; i++) {
3528                 struct bnxt_napi *bnapi = bp->bnapi[i];
3529                 struct bnxt_cp_ring_info *cpr;
3530                 struct bnxt_ring_struct *ring;
3531
3532                 if (!bnapi)
3533                         continue;
3534
3535                 cpr = &bnapi->cp_ring;
3536                 cpr->bnapi = bnapi;
3537                 ring = &cpr->cp_ring_struct;
3538
3539                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3540                 if (rc)
3541                         return rc;
3542
3543                 if (ulp_msix && i >= ulp_base_vec)
3544                         ring->map_idx = i + ulp_msix;
3545                 else
3546                         ring->map_idx = i;
3547
3548                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3549                         continue;
3550
3551                 if (i < bp->rx_nr_rings) {
3552                         struct bnxt_cp_ring_info *cpr2 =
3553                                 bnxt_alloc_cp_sub_ring(bp);
3554
3555                         cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
3556                         if (!cpr2)
3557                                 return -ENOMEM;
3558                         cpr2->bnapi = bnapi;
3559                 }
3560                 if ((sh && i < bp->tx_nr_rings) ||
3561                     (!sh && i >= bp->rx_nr_rings)) {
3562                         struct bnxt_cp_ring_info *cpr2 =
3563                                 bnxt_alloc_cp_sub_ring(bp);
3564
3565                         cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
3566                         if (!cpr2)
3567                                 return -ENOMEM;
3568                         cpr2->bnapi = bnapi;
3569                 }
3570         }
3571         return 0;
3572 }
3573
3574 static void bnxt_init_ring_struct(struct bnxt *bp)
3575 {
3576         int i;
3577
3578         for (i = 0; i < bp->cp_nr_rings; i++) {
3579                 struct bnxt_napi *bnapi = bp->bnapi[i];
3580                 struct bnxt_ring_mem_info *rmem;
3581                 struct bnxt_cp_ring_info *cpr;
3582                 struct bnxt_rx_ring_info *rxr;
3583                 struct bnxt_tx_ring_info *txr;
3584                 struct bnxt_ring_struct *ring;
3585
3586                 if (!bnapi)
3587                         continue;
3588
3589                 cpr = &bnapi->cp_ring;
3590                 ring = &cpr->cp_ring_struct;
3591                 rmem = &ring->ring_mem;
3592                 rmem->nr_pages = bp->cp_nr_pages;
3593                 rmem->page_size = HW_CMPD_RING_SIZE;
3594                 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3595                 rmem->dma_arr = cpr->cp_desc_mapping;
3596                 rmem->vmem_size = 0;
3597
3598                 rxr = bnapi->rx_ring;
3599                 if (!rxr)
3600                         goto skip_rx;
3601
3602                 ring = &rxr->rx_ring_struct;
3603                 rmem = &ring->ring_mem;
3604                 rmem->nr_pages = bp->rx_nr_pages;
3605                 rmem->page_size = HW_RXBD_RING_SIZE;
3606                 rmem->pg_arr = (void **)rxr->rx_desc_ring;
3607                 rmem->dma_arr = rxr->rx_desc_mapping;
3608                 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
3609                 rmem->vmem = (void **)&rxr->rx_buf_ring;
3610
3611                 ring = &rxr->rx_agg_ring_struct;
3612                 rmem = &ring->ring_mem;
3613                 rmem->nr_pages = bp->rx_agg_nr_pages;
3614                 rmem->page_size = HW_RXBD_RING_SIZE;
3615                 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
3616                 rmem->dma_arr = rxr->rx_agg_desc_mapping;
3617                 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
3618                 rmem->vmem = (void **)&rxr->rx_agg_ring;
3619
3620 skip_rx:
3621                 txr = bnapi->tx_ring;
3622                 if (!txr)
3623                         continue;
3624
3625                 ring = &txr->tx_ring_struct;
3626                 rmem = &ring->ring_mem;
3627                 rmem->nr_pages = bp->tx_nr_pages;
3628                 rmem->page_size = HW_RXBD_RING_SIZE;
3629                 rmem->pg_arr = (void **)txr->tx_desc_ring;
3630                 rmem->dma_arr = txr->tx_desc_mapping;
3631                 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
3632                 rmem->vmem = (void **)&txr->tx_buf_ring;
3633         }
3634 }
3635
3636 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
3637 {
3638         int i;
3639         u32 prod;
3640         struct rx_bd **rx_buf_ring;
3641
3642         rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
3643         for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
3644                 int j;
3645                 struct rx_bd *rxbd;
3646
3647                 rxbd = rx_buf_ring[i];
3648                 if (!rxbd)
3649                         continue;
3650
3651                 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
3652                         rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
3653                         rxbd->rx_bd_opaque = prod;
3654                 }
3655         }
3656 }
3657
3658 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
3659 {
3660         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
3661         struct net_device *dev = bp->dev;
3662         u32 prod;
3663         int i;
3664
3665         prod = rxr->rx_prod;
3666         for (i = 0; i < bp->rx_ring_size; i++) {
3667                 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
3668                         netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
3669                                     ring_nr, i, bp->rx_ring_size);
3670                         break;
3671                 }
3672                 prod = NEXT_RX(prod);
3673         }
3674         rxr->rx_prod = prod;
3675
3676         if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
3677                 return 0;
3678
3679         prod = rxr->rx_agg_prod;
3680         for (i = 0; i < bp->rx_agg_ring_size; i++) {
3681                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
3682                         netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
3683                                     ring_nr, i, bp->rx_ring_size);
3684                         break;
3685                 }
3686                 prod = NEXT_RX_AGG(prod);
3687         }
3688         rxr->rx_agg_prod = prod;
3689
3690         if (rxr->rx_tpa) {
3691                 dma_addr_t mapping;
3692                 u8 *data;
3693
3694                 for (i = 0; i < bp->max_tpa; i++) {
3695                         data = __bnxt_alloc_rx_frag(bp, &mapping, GFP_KERNEL);
3696                         if (!data)
3697                                 return -ENOMEM;
3698
3699                         rxr->rx_tpa[i].data = data;
3700                         rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
3701                         rxr->rx_tpa[i].mapping = mapping;
3702                 }
3703         }
3704         return 0;
3705 }
3706
3707 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
3708 {
3709         struct bnxt_rx_ring_info *rxr;
3710         struct bnxt_ring_struct *ring;
3711         u32 type;
3712
3713         type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
3714                 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
3715
3716         if (NET_IP_ALIGN == 2)
3717                 type |= RX_BD_FLAGS_SOP;
3718
3719         rxr = &bp->rx_ring[ring_nr];
3720         ring = &rxr->rx_ring_struct;
3721         bnxt_init_rxbd_pages(ring, type);
3722
3723         if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
3724                 bpf_prog_add(bp->xdp_prog, 1);
3725                 rxr->xdp_prog = bp->xdp_prog;
3726         }
3727         ring->fw_ring_id = INVALID_HW_RING_ID;
3728
3729         ring = &rxr->rx_agg_ring_struct;
3730         ring->fw_ring_id = INVALID_HW_RING_ID;
3731
3732         if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
3733                 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
3734                         RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
3735
3736                 bnxt_init_rxbd_pages(ring, type);
3737         }
3738
3739         return bnxt_alloc_one_rx_ring(bp, ring_nr);
3740 }
3741
3742 static void bnxt_init_cp_rings(struct bnxt *bp)
3743 {
3744         int i, j;
3745
3746         for (i = 0; i < bp->cp_nr_rings; i++) {
3747                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
3748                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3749
3750                 ring->fw_ring_id = INVALID_HW_RING_ID;
3751                 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3752                 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3753                 for (j = 0; j < 2; j++) {
3754                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3755
3756                         if (!cpr2)
3757                                 continue;
3758
3759                         ring = &cpr2->cp_ring_struct;
3760                         ring->fw_ring_id = INVALID_HW_RING_ID;
3761                         cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3762                         cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3763                 }
3764         }
3765 }
3766
3767 static int bnxt_init_rx_rings(struct bnxt *bp)
3768 {
3769         int i, rc = 0;
3770
3771         if (BNXT_RX_PAGE_MODE(bp)) {
3772                 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
3773                 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
3774         } else {
3775                 bp->rx_offset = BNXT_RX_OFFSET;
3776                 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
3777         }
3778
3779         for (i = 0; i < bp->rx_nr_rings; i++) {
3780                 rc = bnxt_init_one_rx_ring(bp, i);
3781                 if (rc)
3782                         break;
3783         }
3784
3785         return rc;
3786 }
3787
3788 static int bnxt_init_tx_rings(struct bnxt *bp)
3789 {
3790         u16 i;
3791
3792         bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
3793                                    BNXT_MIN_TX_DESC_CNT);
3794
3795         for (i = 0; i < bp->tx_nr_rings; i++) {
3796                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3797                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3798
3799                 ring->fw_ring_id = INVALID_HW_RING_ID;
3800         }
3801
3802         return 0;
3803 }
3804
3805 static void bnxt_free_ring_grps(struct bnxt *bp)
3806 {
3807         kfree(bp->grp_info);
3808         bp->grp_info = NULL;
3809 }
3810
3811 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
3812 {
3813         int i;
3814
3815         if (irq_re_init) {
3816                 bp->grp_info = kcalloc(bp->cp_nr_rings,
3817                                        sizeof(struct bnxt_ring_grp_info),
3818                                        GFP_KERNEL);
3819                 if (!bp->grp_info)
3820                         return -ENOMEM;
3821         }
3822         for (i = 0; i < bp->cp_nr_rings; i++) {
3823                 if (irq_re_init)
3824                         bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
3825                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3826                 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
3827                 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
3828                 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3829         }
3830         return 0;
3831 }
3832
3833 static void bnxt_free_vnics(struct bnxt *bp)
3834 {
3835         kfree(bp->vnic_info);
3836         bp->vnic_info = NULL;
3837         bp->nr_vnics = 0;
3838 }
3839
3840 static int bnxt_alloc_vnics(struct bnxt *bp)
3841 {
3842         int num_vnics = 1;
3843
3844 #ifdef CONFIG_RFS_ACCEL
3845         if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
3846                 num_vnics += bp->rx_nr_rings;
3847 #endif
3848
3849         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3850                 num_vnics++;
3851
3852         bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3853                                 GFP_KERNEL);
3854         if (!bp->vnic_info)
3855                 return -ENOMEM;
3856
3857         bp->nr_vnics = num_vnics;
3858         return 0;
3859 }
3860
3861 static void bnxt_init_vnics(struct bnxt *bp)
3862 {
3863         int i;
3864
3865         for (i = 0; i < bp->nr_vnics; i++) {
3866                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3867                 int j;
3868
3869                 vnic->fw_vnic_id = INVALID_HW_RING_ID;
3870                 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3871                         vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3872
3873                 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3874
3875                 if (bp->vnic_info[i].rss_hash_key) {
3876                         if (i == 0)
3877                                 get_random_bytes(vnic->rss_hash_key,
3878                                               HW_HASH_KEY_SIZE);
3879                         else
3880                                 memcpy(vnic->rss_hash_key,
3881                                        bp->vnic_info[0].rss_hash_key,
3882                                        HW_HASH_KEY_SIZE);
3883                 }
3884         }
3885 }
3886
3887 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3888 {
3889         int pages;
3890
3891         pages = ring_size / desc_per_pg;
3892
3893         if (!pages)
3894                 return 1;
3895
3896         pages++;
3897
3898         while (pages & (pages - 1))
3899                 pages++;
3900
3901         return pages;
3902 }
3903
3904 void bnxt_set_tpa_flags(struct bnxt *bp)
3905 {
3906         bp->flags &= ~BNXT_FLAG_TPA;
3907         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3908                 return;
3909         if (bp->dev->features & NETIF_F_LRO)
3910                 bp->flags |= BNXT_FLAG_LRO;
3911         else if (bp->dev->features & NETIF_F_GRO_HW)
3912                 bp->flags |= BNXT_FLAG_GRO;
3913 }
3914
3915 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3916  * be set on entry.
3917  */
3918 void bnxt_set_ring_params(struct bnxt *bp)
3919 {
3920         u32 ring_size, rx_size, rx_space, max_rx_cmpl;
3921         u32 agg_factor = 0, agg_ring_size = 0;
3922
3923         /* 8 for CRC and VLAN */
3924         rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3925
3926         rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +
3927                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3928
3929         bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3930         ring_size = bp->rx_ring_size;
3931         bp->rx_agg_ring_size = 0;
3932         bp->rx_agg_nr_pages = 0;
3933
3934         if (bp->flags & BNXT_FLAG_TPA)
3935                 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
3936
3937         bp->flags &= ~BNXT_FLAG_JUMBO;
3938         if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
3939                 u32 jumbo_factor;
3940
3941                 bp->flags |= BNXT_FLAG_JUMBO;
3942                 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3943                 if (jumbo_factor > agg_factor)
3944                         agg_factor = jumbo_factor;
3945         }
3946         if (agg_factor) {
3947                 if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) {
3948                         ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
3949                         netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n",
3950                                     bp->rx_ring_size, ring_size);
3951                         bp->rx_ring_size = ring_size;
3952                 }
3953                 agg_ring_size = ring_size * agg_factor;
3954
3955                 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3956                                                         RX_DESC_CNT);
3957                 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3958                         u32 tmp = agg_ring_size;
3959
3960                         bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3961                         agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3962                         netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3963                                     tmp, agg_ring_size);
3964                 }
3965                 bp->rx_agg_ring_size = agg_ring_size;
3966                 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3967
3968                 if (BNXT_RX_PAGE_MODE(bp)) {
3969                         rx_space = BNXT_PAGE_MODE_BUF_SIZE;
3970                         rx_size = BNXT_MAX_PAGE_MODE_MTU;
3971                 } else {
3972                         rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3973                         rx_space = rx_size + NET_SKB_PAD +
3974                                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3975                 }
3976         }
3977
3978         bp->rx_buf_use_size = rx_size;
3979         bp->rx_buf_size = rx_space;
3980
3981         bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3982         bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3983
3984         ring_size = bp->tx_ring_size;
3985         bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3986         bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3987
3988         max_rx_cmpl = bp->rx_ring_size;
3989         /* MAX TPA needs to be added because TPA_START completions are
3990          * immediately recycled, so the TPA completions are not bound by
3991          * the RX ring size.
3992          */
3993         if (bp->flags & BNXT_FLAG_TPA)
3994                 max_rx_cmpl += bp->max_tpa;
3995         /* RX and TPA completions are 32-byte, all others are 16-byte */
3996         ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
3997         bp->cp_ring_size = ring_size;
3998
3999         bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
4000         if (bp->cp_nr_pages > MAX_CP_PAGES) {
4001                 bp->cp_nr_pages = MAX_CP_PAGES;
4002                 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
4003                 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
4004                             ring_size, bp->cp_ring_size);
4005         }
4006         bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
4007         bp->cp_ring_mask = bp->cp_bit - 1;
4008 }
4009
4010 /* Changing allocation mode of RX rings.
4011  * TODO: Update when extending xdp_rxq_info to support allocation modes.
4012  */
4013 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
4014 {
4015         if (page_mode) {
4016                 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
4017                 bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
4018
4019                 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
4020                         bp->flags |= BNXT_FLAG_JUMBO;
4021                         bp->rx_skb_func = bnxt_rx_multi_page_skb;
4022                         bp->dev->max_mtu =
4023                                 min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
4024                 } else {
4025                         bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
4026                         bp->rx_skb_func = bnxt_rx_page_skb;
4027                         bp->dev->max_mtu =
4028                                 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
4029                 }
4030                 bp->rx_dir = DMA_BIDIRECTIONAL;
4031                 /* Disable LRO or GRO_HW */
4032                 netdev_update_features(bp->dev);
4033         } else {
4034                 bp->dev->max_mtu = bp->max_mtu;
4035                 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
4036                 bp->rx_dir = DMA_FROM_DEVICE;
4037                 bp->rx_skb_func = bnxt_rx_skb;
4038         }
4039         return 0;
4040 }
4041
4042 static void bnxt_free_vnic_attributes(struct bnxt *bp)
4043 {
4044         int i;
4045         struct bnxt_vnic_info *vnic;
4046         struct pci_dev *pdev = bp->pdev;
4047
4048         if (!bp->vnic_info)
4049                 return;
4050
4051         for (i = 0; i < bp->nr_vnics; i++) {
4052                 vnic = &bp->vnic_info[i];
4053
4054                 kfree(vnic->fw_grp_ids);
4055                 vnic->fw_grp_ids = NULL;
4056
4057                 kfree(vnic->uc_list);
4058                 vnic->uc_list = NULL;
4059
4060                 if (vnic->mc_list) {
4061                         dma_free_coherent(&pdev->dev, vnic->mc_list_size,
4062                                           vnic->mc_list, vnic->mc_list_mapping);
4063                         vnic->mc_list = NULL;
4064                 }
4065
4066                 if (vnic->rss_table) {
4067                         dma_free_coherent(&pdev->dev, vnic->rss_table_size,
4068                                           vnic->rss_table,
4069                                           vnic->rss_table_dma_addr);
4070                         vnic->rss_table = NULL;
4071                 }
4072
4073                 vnic->rss_hash_key = NULL;
4074                 vnic->flags = 0;
4075         }
4076 }
4077
4078 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
4079 {
4080         int i, rc = 0, size;
4081         struct bnxt_vnic_info *vnic;
4082         struct pci_dev *pdev = bp->pdev;
4083         int max_rings;
4084
4085         for (i = 0; i < bp->nr_vnics; i++) {
4086                 vnic = &bp->vnic_info[i];
4087
4088                 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
4089                         int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
4090
4091                         if (mem_size > 0) {
4092                                 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
4093                                 if (!vnic->uc_list) {
4094                                         rc = -ENOMEM;
4095                                         goto out;
4096                                 }
4097                         }
4098                 }
4099
4100                 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
4101                         vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
4102                         vnic->mc_list =
4103                                 dma_alloc_coherent(&pdev->dev,
4104                                                    vnic->mc_list_size,
4105                                                    &vnic->mc_list_mapping,
4106                                                    GFP_KERNEL);
4107                         if (!vnic->mc_list) {
4108                                 rc = -ENOMEM;
4109                                 goto out;
4110                         }
4111                 }
4112
4113                 if (bp->flags & BNXT_FLAG_CHIP_P5)
4114                         goto vnic_skip_grps;
4115
4116                 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
4117                         max_rings = bp->rx_nr_rings;
4118                 else
4119                         max_rings = 1;
4120
4121                 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
4122                 if (!vnic->fw_grp_ids) {
4123                         rc = -ENOMEM;
4124                         goto out;
4125                 }
4126 vnic_skip_grps:
4127                 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
4128                     !(vnic->flags & BNXT_VNIC_RSS_FLAG))
4129                         continue;
4130
4131                 /* Allocate rss table and hash key */
4132                 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
4133                 if (bp->flags & BNXT_FLAG_CHIP_P5)
4134                         size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
4135
4136                 vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
4137                 vnic->rss_table = dma_alloc_coherent(&pdev->dev,
4138                                                      vnic->rss_table_size,
4139                                                      &vnic->rss_table_dma_addr,
4140                                                      GFP_KERNEL);
4141                 if (!vnic->rss_table) {
4142                         rc = -ENOMEM;
4143                         goto out;
4144                 }
4145
4146                 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
4147                 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
4148         }
4149         return 0;
4150
4151 out:
4152         return rc;
4153 }
4154
4155 static void bnxt_free_hwrm_resources(struct bnxt *bp)
4156 {
4157         struct bnxt_hwrm_wait_token *token;
4158
4159         dma_pool_destroy(bp->hwrm_dma_pool);
4160         bp->hwrm_dma_pool = NULL;
4161
4162         rcu_read_lock();
4163         hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node)
4164                 WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED);
4165         rcu_read_unlock();
4166 }
4167
4168 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
4169 {
4170         bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev,
4171                                             BNXT_HWRM_DMA_SIZE,
4172                                             BNXT_HWRM_DMA_ALIGN, 0);
4173         if (!bp->hwrm_dma_pool)
4174                 return -ENOMEM;
4175
4176         INIT_HLIST_HEAD(&bp->hwrm_pending_list);
4177
4178         return 0;
4179 }
4180
4181 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
4182 {
4183         kfree(stats->hw_masks);
4184         stats->hw_masks = NULL;
4185         kfree(stats->sw_stats);
4186         stats->sw_stats = NULL;
4187         if (stats->hw_stats) {
4188                 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
4189                                   stats->hw_stats_map);
4190                 stats->hw_stats = NULL;
4191         }
4192 }
4193
4194 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
4195                                 bool alloc_masks)
4196 {
4197         stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
4198                                              &stats->hw_stats_map, GFP_KERNEL);
4199         if (!stats->hw_stats)
4200                 return -ENOMEM;
4201
4202         stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
4203         if (!stats->sw_stats)
4204                 goto stats_mem_err;
4205
4206         if (alloc_masks) {
4207                 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
4208                 if (!stats->hw_masks)
4209                         goto stats_mem_err;
4210         }
4211         return 0;
4212
4213 stats_mem_err:
4214         bnxt_free_stats_mem(bp, stats);
4215         return -ENOMEM;
4216 }
4217
4218 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
4219 {
4220         int i;
4221
4222         for (i = 0; i < count; i++)
4223                 mask_arr[i] = mask;
4224 }
4225
4226 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
4227 {
4228         int i;
4229
4230         for (i = 0; i < count; i++)
4231                 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
4232 }
4233
4234 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
4235                                     struct bnxt_stats_mem *stats)
4236 {
4237         struct hwrm_func_qstats_ext_output *resp;
4238         struct hwrm_func_qstats_ext_input *req;
4239         __le64 *hw_masks;
4240         int rc;
4241
4242         if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
4243             !(bp->flags & BNXT_FLAG_CHIP_P5))
4244                 return -EOPNOTSUPP;
4245
4246         rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
4247         if (rc)
4248                 return rc;
4249
4250         req->fid = cpu_to_le16(0xffff);
4251         req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
4252
4253         resp = hwrm_req_hold(bp, req);
4254         rc = hwrm_req_send(bp, req);
4255         if (!rc) {
4256                 hw_masks = &resp->rx_ucast_pkts;
4257                 bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
4258         }
4259         hwrm_req_drop(bp, req);
4260         return rc;
4261 }
4262
4263 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
4264 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
4265
4266 static void bnxt_init_stats(struct bnxt *bp)
4267 {
4268         struct bnxt_napi *bnapi = bp->bnapi[0];
4269         struct bnxt_cp_ring_info *cpr;
4270         struct bnxt_stats_mem *stats;
4271         __le64 *rx_stats, *tx_stats;
4272         int rc, rx_count, tx_count;
4273         u64 *rx_masks, *tx_masks;
4274         u64 mask;
4275         u8 flags;
4276
4277         cpr = &bnapi->cp_ring;
4278         stats = &cpr->stats;
4279         rc = bnxt_hwrm_func_qstat_ext(bp, stats);
4280         if (rc) {
4281                 if (bp->flags & BNXT_FLAG_CHIP_P5)
4282                         mask = (1ULL << 48) - 1;
4283                 else
4284                         mask = -1ULL;
4285                 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
4286         }
4287         if (bp->flags & BNXT_FLAG_PORT_STATS) {
4288                 stats = &bp->port_stats;
4289                 rx_stats = stats->hw_stats;
4290                 rx_masks = stats->hw_masks;
4291                 rx_count = sizeof(struct rx_port_stats) / 8;
4292                 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4293                 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4294                 tx_count = sizeof(struct tx_port_stats) / 8;
4295
4296                 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
4297                 rc = bnxt_hwrm_port_qstats(bp, flags);
4298                 if (rc) {
4299                         mask = (1ULL << 40) - 1;
4300
4301                         bnxt_fill_masks(rx_masks, mask, rx_count);
4302                         bnxt_fill_masks(tx_masks, mask, tx_count);
4303                 } else {
4304                         bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4305                         bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
4306                         bnxt_hwrm_port_qstats(bp, 0);
4307                 }
4308         }
4309         if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
4310                 stats = &bp->rx_port_stats_ext;
4311                 rx_stats = stats->hw_stats;
4312                 rx_masks = stats->hw_masks;
4313                 rx_count = sizeof(struct rx_port_stats_ext) / 8;
4314                 stats = &bp->tx_port_stats_ext;
4315                 tx_stats = stats->hw_stats;
4316                 tx_masks = stats->hw_masks;
4317                 tx_count = sizeof(struct tx_port_stats_ext) / 8;
4318
4319                 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
4320                 rc = bnxt_hwrm_port_qstats_ext(bp, flags);
4321                 if (rc) {
4322                         mask = (1ULL << 40) - 1;
4323
4324                         bnxt_fill_masks(rx_masks, mask, rx_count);
4325                         if (tx_stats)
4326                                 bnxt_fill_masks(tx_masks, mask, tx_count);
4327                 } else {
4328                         bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4329                         if (tx_stats)
4330                                 bnxt_copy_hw_masks(tx_masks, tx_stats,
4331                                                    tx_count);
4332                         bnxt_hwrm_port_qstats_ext(bp, 0);
4333                 }
4334         }
4335 }
4336
4337 static void bnxt_free_port_stats(struct bnxt *bp)
4338 {
4339         bp->flags &= ~BNXT_FLAG_PORT_STATS;
4340         bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
4341
4342         bnxt_free_stats_mem(bp, &bp->port_stats);
4343         bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
4344         bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
4345 }
4346
4347 static void bnxt_free_ring_stats(struct bnxt *bp)
4348 {
4349         int i;
4350
4351         if (!bp->bnapi)
4352                 return;
4353
4354         for (i = 0; i < bp->cp_nr_rings; i++) {
4355                 struct bnxt_napi *bnapi = bp->bnapi[i];
4356                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4357
4358                 bnxt_free_stats_mem(bp, &cpr->stats);
4359         }
4360 }
4361
4362 static int bnxt_alloc_stats(struct bnxt *bp)
4363 {
4364         u32 size, i;
4365         int rc;
4366
4367         size = bp->hw_ring_stats_size;
4368
4369         for (i = 0; i < bp->cp_nr_rings; i++) {
4370                 struct bnxt_napi *bnapi = bp->bnapi[i];
4371                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4372
4373                 cpr->stats.len = size;
4374                 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
4375                 if (rc)
4376                         return rc;
4377
4378                 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
4379         }
4380
4381         if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
4382                 return 0;
4383
4384         if (bp->port_stats.hw_stats)
4385                 goto alloc_ext_stats;
4386
4387         bp->port_stats.len = BNXT_PORT_STATS_SIZE;
4388         rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
4389         if (rc)
4390                 return rc;
4391
4392         bp->flags |= BNXT_FLAG_PORT_STATS;
4393
4394 alloc_ext_stats:
4395         /* Display extended statistics only if FW supports it */
4396         if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
4397                 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
4398                         return 0;
4399
4400         if (bp->rx_port_stats_ext.hw_stats)
4401                 goto alloc_tx_ext_stats;
4402
4403         bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
4404         rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
4405         /* Extended stats are optional */
4406         if (rc)
4407                 return 0;
4408
4409 alloc_tx_ext_stats:
4410         if (bp->tx_port_stats_ext.hw_stats)
4411                 return 0;
4412
4413         if (bp->hwrm_spec_code >= 0x10902 ||
4414             (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
4415                 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
4416                 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
4417                 /* Extended stats are optional */
4418                 if (rc)
4419                         return 0;
4420         }
4421         bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
4422         return 0;
4423 }
4424
4425 static void bnxt_clear_ring_indices(struct bnxt *bp)
4426 {
4427         int i;
4428
4429         if (!bp->bnapi)
4430                 return;
4431
4432         for (i = 0; i < bp->cp_nr_rings; i++) {
4433                 struct bnxt_napi *bnapi = bp->bnapi[i];
4434                 struct bnxt_cp_ring_info *cpr;
4435                 struct bnxt_rx_ring_info *rxr;
4436                 struct bnxt_tx_ring_info *txr;
4437
4438                 if (!bnapi)
4439                         continue;
4440
4441                 cpr = &bnapi->cp_ring;
4442                 cpr->cp_raw_cons = 0;
4443
4444                 txr = bnapi->tx_ring;
4445                 if (txr) {
4446                         txr->tx_prod = 0;
4447                         txr->tx_cons = 0;
4448                 }
4449
4450                 rxr = bnapi->rx_ring;
4451                 if (rxr) {
4452                         rxr->rx_prod = 0;
4453                         rxr->rx_agg_prod = 0;
4454                         rxr->rx_sw_agg_prod = 0;
4455                         rxr->rx_next_cons = 0;
4456                 }
4457         }
4458 }
4459
4460 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
4461 {
4462 #ifdef CONFIG_RFS_ACCEL
4463         int i;
4464
4465         /* Under rtnl_lock and all our NAPIs have been disabled.  It's
4466          * safe to delete the hash table.
4467          */
4468         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
4469                 struct hlist_head *head;
4470                 struct hlist_node *tmp;
4471                 struct bnxt_ntuple_filter *fltr;
4472
4473                 head = &bp->ntp_fltr_hash_tbl[i];
4474                 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
4475                         hlist_del(&fltr->hash);
4476                         kfree(fltr);
4477                 }
4478         }
4479         if (irq_reinit) {
4480                 bitmap_free(bp->ntp_fltr_bmap);
4481                 bp->ntp_fltr_bmap = NULL;
4482         }
4483         bp->ntp_fltr_count = 0;
4484 #endif
4485 }
4486
4487 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
4488 {
4489 #ifdef CONFIG_RFS_ACCEL
4490         int i, rc = 0;
4491
4492         if (!(bp->flags & BNXT_FLAG_RFS))
4493                 return 0;
4494
4495         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
4496                 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
4497
4498         bp->ntp_fltr_count = 0;
4499         bp->ntp_fltr_bmap = bitmap_zalloc(BNXT_NTP_FLTR_MAX_FLTR, GFP_KERNEL);
4500
4501         if (!bp->ntp_fltr_bmap)
4502                 rc = -ENOMEM;
4503
4504         return rc;
4505 #else
4506         return 0;
4507 #endif
4508 }
4509
4510 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
4511 {
4512         bnxt_free_vnic_attributes(bp);
4513         bnxt_free_tx_rings(bp);
4514         bnxt_free_rx_rings(bp);
4515         bnxt_free_cp_rings(bp);
4516         bnxt_free_all_cp_arrays(bp);
4517         bnxt_free_ntp_fltrs(bp, irq_re_init);
4518         if (irq_re_init) {
4519                 bnxt_free_ring_stats(bp);
4520                 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
4521                     test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
4522                         bnxt_free_port_stats(bp);
4523                 bnxt_free_ring_grps(bp);
4524                 bnxt_free_vnics(bp);
4525                 kfree(bp->tx_ring_map);
4526                 bp->tx_ring_map = NULL;
4527                 kfree(bp->tx_ring);
4528                 bp->tx_ring = NULL;
4529                 kfree(bp->rx_ring);
4530                 bp->rx_ring = NULL;
4531                 kfree(bp->bnapi);
4532                 bp->bnapi = NULL;
4533         } else {
4534                 bnxt_clear_ring_indices(bp);
4535         }
4536 }
4537
4538 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
4539 {
4540         int i, j, rc, size, arr_size;
4541         void *bnapi;
4542
4543         if (irq_re_init) {
4544                 /* Allocate bnapi mem pointer array and mem block for
4545                  * all queues
4546                  */
4547                 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
4548                                 bp->cp_nr_rings);
4549                 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
4550                 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
4551                 if (!bnapi)
4552                         return -ENOMEM;
4553
4554                 bp->bnapi = bnapi;
4555                 bnapi += arr_size;
4556                 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
4557                         bp->bnapi[i] = bnapi;
4558                         bp->bnapi[i]->index = i;
4559                         bp->bnapi[i]->bp = bp;
4560                         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4561                                 struct bnxt_cp_ring_info *cpr =
4562                                         &bp->bnapi[i]->cp_ring;
4563
4564                                 cpr->cp_ring_struct.ring_mem.flags =
4565                                         BNXT_RMEM_RING_PTE_FLAG;
4566                         }
4567                 }
4568
4569                 bp->rx_ring = kcalloc(bp->rx_nr_rings,
4570                                       sizeof(struct bnxt_rx_ring_info),
4571                                       GFP_KERNEL);
4572                 if (!bp->rx_ring)
4573                         return -ENOMEM;
4574
4575                 for (i = 0; i < bp->rx_nr_rings; i++) {
4576                         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4577
4578                         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4579                                 rxr->rx_ring_struct.ring_mem.flags =
4580                                         BNXT_RMEM_RING_PTE_FLAG;
4581                                 rxr->rx_agg_ring_struct.ring_mem.flags =
4582                                         BNXT_RMEM_RING_PTE_FLAG;
4583                         }
4584                         rxr->bnapi = bp->bnapi[i];
4585                         bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
4586                 }
4587
4588                 bp->tx_ring = kcalloc(bp->tx_nr_rings,
4589                                       sizeof(struct bnxt_tx_ring_info),
4590                                       GFP_KERNEL);
4591                 if (!bp->tx_ring)
4592                         return -ENOMEM;
4593
4594                 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
4595                                           GFP_KERNEL);
4596
4597                 if (!bp->tx_ring_map)
4598                         return -ENOMEM;
4599
4600                 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4601                         j = 0;
4602                 else
4603                         j = bp->rx_nr_rings;
4604
4605                 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
4606                         struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4607
4608                         if (bp->flags & BNXT_FLAG_CHIP_P5)
4609                                 txr->tx_ring_struct.ring_mem.flags =
4610                                         BNXT_RMEM_RING_PTE_FLAG;
4611                         txr->bnapi = bp->bnapi[j];
4612                         bp->bnapi[j]->tx_ring = txr;
4613                         bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
4614                         if (i >= bp->tx_nr_rings_xdp) {
4615                                 txr->txq_index = i - bp->tx_nr_rings_xdp;
4616                                 bp->bnapi[j]->tx_int = bnxt_tx_int;
4617                         } else {
4618                                 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
4619                                 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
4620                         }
4621                 }
4622
4623                 rc = bnxt_alloc_stats(bp);
4624                 if (rc)
4625                         goto alloc_mem_err;
4626                 bnxt_init_stats(bp);
4627
4628                 rc = bnxt_alloc_ntp_fltrs(bp);
4629                 if (rc)
4630                         goto alloc_mem_err;
4631
4632                 rc = bnxt_alloc_vnics(bp);
4633                 if (rc)
4634                         goto alloc_mem_err;
4635         }
4636
4637         rc = bnxt_alloc_all_cp_arrays(bp);
4638         if (rc)
4639                 goto alloc_mem_err;
4640
4641         bnxt_init_ring_struct(bp);
4642
4643         rc = bnxt_alloc_rx_rings(bp);
4644         if (rc)
4645                 goto alloc_mem_err;
4646
4647         rc = bnxt_alloc_tx_rings(bp);
4648         if (rc)
4649                 goto alloc_mem_err;
4650
4651         rc = bnxt_alloc_cp_rings(bp);
4652         if (rc)
4653                 goto alloc_mem_err;
4654
4655         bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
4656                                   BNXT_VNIC_UCAST_FLAG;
4657         rc = bnxt_alloc_vnic_attributes(bp);
4658         if (rc)
4659                 goto alloc_mem_err;
4660         return 0;
4661
4662 alloc_mem_err:
4663         bnxt_free_mem(bp, true);
4664         return rc;
4665 }
4666
4667 static void bnxt_disable_int(struct bnxt *bp)
4668 {
4669         int i;
4670
4671         if (!bp->bnapi)
4672                 return;
4673
4674         for (i = 0; i < bp->cp_nr_rings; i++) {
4675                 struct bnxt_napi *bnapi = bp->bnapi[i];
4676                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4677                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4678
4679                 if (ring->fw_ring_id != INVALID_HW_RING_ID)
4680                         bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
4681         }
4682 }
4683
4684 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
4685 {
4686         struct bnxt_napi *bnapi = bp->bnapi[n];
4687         struct bnxt_cp_ring_info *cpr;
4688
4689         cpr = &bnapi->cp_ring;
4690         return cpr->cp_ring_struct.map_idx;
4691 }
4692
4693 static void bnxt_disable_int_sync(struct bnxt *bp)
4694 {
4695         int i;
4696
4697         if (!bp->irq_tbl)
4698                 return;
4699
4700         atomic_inc(&bp->intr_sem);
4701
4702         bnxt_disable_int(bp);
4703         for (i = 0; i < bp->cp_nr_rings; i++) {
4704                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
4705
4706                 synchronize_irq(bp->irq_tbl[map_idx].vector);
4707         }
4708 }
4709
4710 static void bnxt_enable_int(struct bnxt *bp)
4711 {
4712         int i;
4713
4714         atomic_set(&bp->intr_sem, 0);
4715         for (i = 0; i < bp->cp_nr_rings; i++) {
4716                 struct bnxt_napi *bnapi = bp->bnapi[i];
4717                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4718
4719                 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
4720         }
4721 }
4722
4723 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
4724                             bool async_only)
4725 {
4726         DECLARE_BITMAP(async_events_bmap, 256);
4727         u32 *events = (u32 *)async_events_bmap;
4728         struct hwrm_func_drv_rgtr_output *resp;
4729         struct hwrm_func_drv_rgtr_input *req;
4730         u32 flags;
4731         int rc, i;
4732
4733         rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR);
4734         if (rc)
4735                 return rc;
4736
4737         req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
4738                                    FUNC_DRV_RGTR_REQ_ENABLES_VER |
4739                                    FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4740
4741         req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
4742         flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
4743         if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
4744                 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
4745         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
4746                 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
4747                          FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
4748         req->flags = cpu_to_le32(flags);
4749         req->ver_maj_8b = DRV_VER_MAJ;
4750         req->ver_min_8b = DRV_VER_MIN;
4751         req->ver_upd_8b = DRV_VER_UPD;
4752         req->ver_maj = cpu_to_le16(DRV_VER_MAJ);
4753         req->ver_min = cpu_to_le16(DRV_VER_MIN);
4754         req->ver_upd = cpu_to_le16(DRV_VER_UPD);
4755
4756         if (BNXT_PF(bp)) {
4757                 u32 data[8];
4758                 int i;
4759
4760                 memset(data, 0, sizeof(data));
4761                 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
4762                         u16 cmd = bnxt_vf_req_snif[i];
4763                         unsigned int bit, idx;
4764
4765                         idx = cmd / 32;
4766                         bit = cmd % 32;
4767                         data[idx] |= 1 << bit;
4768                 }
4769
4770                 for (i = 0; i < 8; i++)
4771                         req->vf_req_fwd[i] = cpu_to_le32(data[i]);
4772
4773                 req->enables |=
4774                         cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
4775         }
4776
4777         if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
4778                 req->flags |= cpu_to_le32(
4779                         FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
4780
4781         memset(async_events_bmap, 0, sizeof(async_events_bmap));
4782         for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
4783                 u16 event_id = bnxt_async_events_arr[i];
4784
4785                 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
4786                     !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
4787                         continue;
4788                 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
4789         }
4790         if (bmap && bmap_size) {
4791                 for (i = 0; i < bmap_size; i++) {
4792                         if (test_bit(i, bmap))
4793                                 __set_bit(i, async_events_bmap);
4794                 }
4795         }
4796         for (i = 0; i < 8; i++)
4797                 req->async_event_fwd[i] |= cpu_to_le32(events[i]);
4798
4799         if (async_only)
4800                 req->enables =
4801                         cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4802
4803         resp = hwrm_req_hold(bp, req);
4804         rc = hwrm_req_send(bp, req);
4805         if (!rc) {
4806                 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
4807                 if (resp->flags &
4808                     cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
4809                         bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4810         }
4811         hwrm_req_drop(bp, req);
4812         return rc;
4813 }
4814
4815 int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4816 {
4817         struct hwrm_func_drv_unrgtr_input *req;
4818         int rc;
4819
4820         if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
4821                 return 0;
4822
4823         rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR);
4824         if (rc)
4825                 return rc;
4826         return hwrm_req_send(bp, req);
4827 }
4828
4829 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4830 {
4831         struct hwrm_tunnel_dst_port_free_input *req;
4832         int rc;
4833
4834         if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN &&
4835             bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID)
4836                 return 0;
4837         if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE &&
4838             bp->nge_fw_dst_port_id == INVALID_HW_RING_ID)
4839                 return 0;
4840
4841         rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE);
4842         if (rc)
4843                 return rc;
4844
4845         req->tunnel_type = tunnel_type;
4846
4847         switch (tunnel_type) {
4848         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
4849                 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
4850                 bp->vxlan_port = 0;
4851                 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
4852                 break;
4853         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
4854                 req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
4855                 bp->nge_port = 0;
4856                 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
4857                 break;
4858         default:
4859                 break;
4860         }
4861
4862         rc = hwrm_req_send(bp, req);
4863         if (rc)
4864                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4865                            rc);
4866         return rc;
4867 }
4868
4869 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4870                                            u8 tunnel_type)
4871 {
4872         struct hwrm_tunnel_dst_port_alloc_output *resp;
4873         struct hwrm_tunnel_dst_port_alloc_input *req;
4874         int rc;
4875
4876         rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC);
4877         if (rc)
4878                 return rc;
4879
4880         req->tunnel_type = tunnel_type;
4881         req->tunnel_dst_port_val = port;
4882
4883         resp = hwrm_req_hold(bp, req);
4884         rc = hwrm_req_send(bp, req);
4885         if (rc) {
4886                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4887                            rc);
4888                 goto err_out;
4889         }
4890
4891         switch (tunnel_type) {
4892         case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
4893                 bp->vxlan_port = port;
4894                 bp->vxlan_fw_dst_port_id =
4895                         le16_to_cpu(resp->tunnel_dst_port_id);
4896                 break;
4897         case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
4898                 bp->nge_port = port;
4899                 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
4900                 break;
4901         default:
4902                 break;
4903         }
4904
4905 err_out:
4906         hwrm_req_drop(bp, req);
4907         return rc;
4908 }
4909
4910 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4911 {
4912         struct hwrm_cfa_l2_set_rx_mask_input *req;
4913         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4914         int rc;
4915
4916         rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK);
4917         if (rc)
4918                 return rc;
4919
4920         req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4921         if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
4922                 req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4923                 req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4924         }
4925         req->mask = cpu_to_le32(vnic->rx_mask);
4926         return hwrm_req_send_silent(bp, req);
4927 }
4928
4929 #ifdef CONFIG_RFS_ACCEL
4930 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4931                                             struct bnxt_ntuple_filter *fltr)
4932 {
4933         struct hwrm_cfa_ntuple_filter_free_input *req;
4934         int rc;
4935
4936         rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
4937         if (rc)
4938                 return rc;
4939
4940         req->ntuple_filter_id = fltr->filter_id;
4941         return hwrm_req_send(bp, req);
4942 }
4943
4944 #define BNXT_NTP_FLTR_FLAGS                                     \
4945         (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID |     \
4946          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE |        \
4947          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR |      \
4948          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE |      \
4949          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |       \
4950          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK |  \
4951          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |       \
4952          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK |  \
4953          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL |      \
4954          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT |         \
4955          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK |    \
4956          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT |         \
4957          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK |    \
4958          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
4959
4960 #define BNXT_NTP_TUNNEL_FLTR_FLAG                               \
4961                 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4962
4963 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4964                                              struct bnxt_ntuple_filter *fltr)
4965 {
4966         struct hwrm_cfa_ntuple_filter_alloc_output *resp;
4967         struct hwrm_cfa_ntuple_filter_alloc_input *req;
4968         struct flow_keys *keys = &fltr->fkeys;
4969         struct bnxt_vnic_info *vnic;
4970         u32 flags = 0;
4971         int rc;
4972
4973         rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
4974         if (rc)
4975                 return rc;
4976
4977         req->l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
4978
4979         if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
4980                 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
4981                 req->dst_id = cpu_to_le16(fltr->rxq);
4982         } else {
4983                 vnic = &bp->vnic_info[fltr->rxq + 1];
4984                 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
4985         }
4986         req->flags = cpu_to_le32(flags);
4987         req->enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
4988
4989         req->ethertype = htons(ETH_P_IP);
4990         memcpy(req->src_macaddr, fltr->src_mac_addr, ETH_ALEN);
4991         req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
4992         req->ip_protocol = keys->basic.ip_proto;
4993
4994         if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4995                 int i;
4996
4997                 req->ethertype = htons(ETH_P_IPV6);
4998                 req->ip_addr_type =
4999                         CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
5000                 *(struct in6_addr *)&req->src_ipaddr[0] =
5001                         keys->addrs.v6addrs.src;
5002                 *(struct in6_addr *)&req->dst_ipaddr[0] =
5003                         keys->addrs.v6addrs.dst;
5004                 for (i = 0; i < 4; i++) {
5005                         req->src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
5006                         req->dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
5007                 }
5008         } else {
5009                 req->src_ipaddr[0] = keys->addrs.v4addrs.src;
5010                 req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
5011                 req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
5012                 req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
5013         }
5014         if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
5015                 req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
5016                 req->tunnel_type =
5017                         CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
5018         }
5019
5020         req->src_port = keys->ports.src;
5021         req->src_port_mask = cpu_to_be16(0xffff);
5022         req->dst_port = keys->ports.dst;
5023         req->dst_port_mask = cpu_to_be16(0xffff);
5024
5025         resp = hwrm_req_hold(bp, req);
5026         rc = hwrm_req_send(bp, req);
5027         if (!rc)
5028                 fltr->filter_id = resp->ntuple_filter_id;
5029         hwrm_req_drop(bp, req);
5030         return rc;
5031 }
5032 #endif
5033
5034 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
5035                                      const u8 *mac_addr)
5036 {
5037         struct hwrm_cfa_l2_filter_alloc_output *resp;
5038         struct hwrm_cfa_l2_filter_alloc_input *req;
5039         int rc;
5040
5041         rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
5042         if (rc)
5043                 return rc;
5044
5045         req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
5046         if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
5047                 req->flags |=
5048                         cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
5049         req->dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
5050         req->enables =
5051                 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
5052                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
5053                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
5054         memcpy(req->l2_addr, mac_addr, ETH_ALEN);
5055         req->l2_addr_mask[0] = 0xff;
5056         req->l2_addr_mask[1] = 0xff;
5057         req->l2_addr_mask[2] = 0xff;
5058         req->l2_addr_mask[3] = 0xff;
5059         req->l2_addr_mask[4] = 0xff;
5060         req->l2_addr_mask[5] = 0xff;
5061
5062         resp = hwrm_req_hold(bp, req);
5063         rc = hwrm_req_send(bp, req);
5064         if (!rc)
5065                 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
5066                                                         resp->l2_filter_id;
5067         hwrm_req_drop(bp, req);
5068         return rc;
5069 }
5070
5071 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
5072 {
5073         struct hwrm_cfa_l2_filter_free_input *req;
5074         u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
5075         int rc;
5076
5077         /* Any associated ntuple filters will also be cleared by firmware. */
5078         rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
5079         if (rc)
5080                 return rc;
5081         hwrm_req_hold(bp, req);
5082         for (i = 0; i < num_of_vnics; i++) {
5083                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5084
5085                 for (j = 0; j < vnic->uc_filter_count; j++) {
5086                         req->l2_filter_id = vnic->fw_l2_filter_id[j];
5087
5088                         rc = hwrm_req_send(bp, req);
5089                 }
5090                 vnic->uc_filter_count = 0;
5091         }
5092         hwrm_req_drop(bp, req);
5093         return rc;
5094 }
5095
5096 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
5097 {
5098         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5099         u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
5100         struct hwrm_vnic_tpa_cfg_input *req;
5101         int rc;
5102
5103         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
5104                 return 0;
5105
5106         rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG);
5107         if (rc)
5108                 return rc;
5109
5110         if (tpa_flags) {
5111                 u16 mss = bp->dev->mtu - 40;
5112                 u32 nsegs, n, segs = 0, flags;
5113
5114                 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
5115                         VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
5116                         VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
5117                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
5118                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
5119                 if (tpa_flags & BNXT_FLAG_GRO)
5120                         flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
5121
5122                 req->flags = cpu_to_le32(flags);
5123
5124                 req->enables =
5125                         cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
5126                                     VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
5127                                     VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
5128
5129                 /* Number of segs are log2 units, and first packet is not
5130                  * included as part of this units.
5131                  */
5132                 if (mss <= BNXT_RX_PAGE_SIZE) {
5133                         n = BNXT_RX_PAGE_SIZE / mss;
5134                         nsegs = (MAX_SKB_FRAGS - 1) * n;
5135                 } else {
5136                         n = mss / BNXT_RX_PAGE_SIZE;
5137                         if (mss & (BNXT_RX_PAGE_SIZE - 1))
5138                                 n++;
5139                         nsegs = (MAX_SKB_FRAGS - n) / n;
5140                 }
5141
5142                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5143                         segs = MAX_TPA_SEGS_P5;
5144                         max_aggs = bp->max_tpa;
5145                 } else {
5146                         segs = ilog2(nsegs);
5147                 }
5148                 req->max_agg_segs = cpu_to_le16(segs);
5149                 req->max_aggs = cpu_to_le16(max_aggs);
5150
5151                 req->min_agg_len = cpu_to_le32(512);
5152         }
5153         req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5154
5155         return hwrm_req_send(bp, req);
5156 }
5157
5158 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
5159 {
5160         struct bnxt_ring_grp_info *grp_info;
5161
5162         grp_info = &bp->grp_info[ring->grp_idx];
5163         return grp_info->cp_fw_ring_id;
5164 }
5165
5166 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
5167 {
5168         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5169                 struct bnxt_napi *bnapi = rxr->bnapi;
5170                 struct bnxt_cp_ring_info *cpr;
5171
5172                 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
5173                 return cpr->cp_ring_struct.fw_ring_id;
5174         } else {
5175                 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
5176         }
5177 }
5178
5179 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
5180 {
5181         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5182                 struct bnxt_napi *bnapi = txr->bnapi;
5183                 struct bnxt_cp_ring_info *cpr;
5184
5185                 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
5186                 return cpr->cp_ring_struct.fw_ring_id;
5187         } else {
5188                 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
5189         }
5190 }
5191
5192 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
5193 {
5194         int entries;
5195
5196         if (bp->flags & BNXT_FLAG_CHIP_P5)
5197                 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
5198         else
5199                 entries = HW_HASH_INDEX_SIZE;
5200
5201         bp->rss_indir_tbl_entries = entries;
5202         bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
5203                                           GFP_KERNEL);
5204         if (!bp->rss_indir_tbl)
5205                 return -ENOMEM;
5206         return 0;
5207 }
5208
5209 static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
5210 {
5211         u16 max_rings, max_entries, pad, i;
5212
5213         if (!bp->rx_nr_rings)
5214                 return;
5215
5216         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5217                 max_rings = bp->rx_nr_rings - 1;
5218         else
5219                 max_rings = bp->rx_nr_rings;
5220
5221         max_entries = bnxt_get_rxfh_indir_size(bp->dev);
5222
5223         for (i = 0; i < max_entries; i++)
5224                 bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
5225
5226         pad = bp->rss_indir_tbl_entries - max_entries;
5227         if (pad)
5228                 memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
5229 }
5230
5231 static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
5232 {
5233         u16 i, tbl_size, max_ring = 0;
5234
5235         if (!bp->rss_indir_tbl)
5236                 return 0;
5237
5238         tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5239         for (i = 0; i < tbl_size; i++)
5240                 max_ring = max(max_ring, bp->rss_indir_tbl[i]);
5241         return max_ring;
5242 }
5243
5244 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
5245 {
5246         if (bp->flags & BNXT_FLAG_CHIP_P5)
5247                 return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
5248         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5249                 return 2;
5250         return 1;
5251 }
5252
5253 static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5254 {
5255         bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
5256         u16 i, j;
5257
5258         /* Fill the RSS indirection table with ring group ids */
5259         for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
5260                 if (!no_rss)
5261                         j = bp->rss_indir_tbl[i];
5262                 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
5263         }
5264 }
5265
5266 static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
5267                                       struct bnxt_vnic_info *vnic)
5268 {
5269         __le16 *ring_tbl = vnic->rss_table;
5270         struct bnxt_rx_ring_info *rxr;
5271         u16 tbl_size, i;
5272
5273         tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5274
5275         for (i = 0; i < tbl_size; i++) {
5276                 u16 ring_id, j;
5277
5278                 j = bp->rss_indir_tbl[i];
5279                 rxr = &bp->rx_ring[j];
5280
5281                 ring_id = rxr->rx_ring_struct.fw_ring_id;
5282                 *ring_tbl++ = cpu_to_le16(ring_id);
5283                 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5284                 *ring_tbl++ = cpu_to_le16(ring_id);
5285         }
5286 }
5287
5288 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5289 {
5290         if (bp->flags & BNXT_FLAG_CHIP_P5)
5291                 __bnxt_fill_hw_rss_tbl_p5(bp, vnic);
5292         else
5293                 __bnxt_fill_hw_rss_tbl(bp, vnic);
5294 }
5295
5296 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
5297 {
5298         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5299         struct hwrm_vnic_rss_cfg_input *req;
5300         int rc;
5301
5302         if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
5303             vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
5304                 return 0;
5305
5306         rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
5307         if (rc)
5308                 return rc;
5309
5310         if (set_rss) {
5311                 bnxt_fill_hw_rss_tbl(bp, vnic);
5312                 req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
5313                 req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5314                 req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
5315                 req->hash_key_tbl_addr =
5316                         cpu_to_le64(vnic->rss_hash_key_dma_addr);
5317         }
5318         req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5319         return hwrm_req_send(bp, req);
5320 }
5321
5322 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
5323 {
5324         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5325         struct hwrm_vnic_rss_cfg_input *req;
5326         dma_addr_t ring_tbl_map;
5327         u32 i, nr_ctxs;
5328         int rc;
5329
5330         rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
5331         if (rc)
5332                 return rc;
5333
5334         req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5335         if (!set_rss)
5336                 return hwrm_req_send(bp, req);
5337
5338         bnxt_fill_hw_rss_tbl(bp, vnic);
5339         req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
5340         req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5341         req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
5342         ring_tbl_map = vnic->rss_table_dma_addr;
5343         nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
5344
5345         hwrm_req_hold(bp, req);
5346         for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
5347                 req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
5348                 req->ring_table_pair_index = i;
5349                 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
5350                 rc = hwrm_req_send(bp, req);
5351                 if (rc)
5352                         goto exit;
5353         }
5354
5355 exit:
5356         hwrm_req_drop(bp, req);
5357         return rc;
5358 }
5359
5360 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
5361 {
5362         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5363         struct hwrm_vnic_plcmodes_cfg_input *req;
5364         int rc;
5365
5366         rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG);
5367         if (rc)
5368                 return rc;
5369
5370         req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
5371         req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
5372
5373         if (BNXT_RX_PAGE_MODE(bp) && !BNXT_RX_JUMBO_MODE(bp)) {
5374                 req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
5375                                           VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
5376                 req->enables |=
5377                         cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
5378         }
5379         /* thresholds not implemented in firmware yet */
5380         req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
5381         req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
5382         req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5383         return hwrm_req_send(bp, req);
5384 }
5385
5386 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
5387                                         u16 ctx_idx)
5388 {
5389         struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
5390
5391         if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
5392                 return;
5393
5394         req->rss_cos_lb_ctx_id =
5395                 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
5396
5397         hwrm_req_send(bp, req);
5398         bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
5399 }
5400
5401 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
5402 {
5403         int i, j;
5404
5405         for (i = 0; i < bp->nr_vnics; i++) {
5406                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5407
5408                 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
5409                         if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
5410                                 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
5411                 }
5412         }
5413         bp->rsscos_nr_ctxs = 0;
5414 }
5415
5416 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
5417 {
5418         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
5419         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
5420         int rc;
5421
5422         rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
5423         if (rc)
5424                 return rc;
5425
5426         resp = hwrm_req_hold(bp, req);
5427         rc = hwrm_req_send(bp, req);
5428         if (!rc)
5429                 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
5430                         le16_to_cpu(resp->rss_cos_lb_ctx_id);
5431         hwrm_req_drop(bp, req);
5432
5433         return rc;
5434 }
5435
5436 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
5437 {
5438         if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
5439                 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
5440         return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
5441 }
5442
5443 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
5444 {
5445         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5446         struct hwrm_vnic_cfg_input *req;
5447         unsigned int ring = 0, grp_idx;
5448         u16 def_vlan = 0;
5449         int rc;
5450
5451         rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG);
5452         if (rc)
5453                 return rc;
5454
5455         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5456                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
5457
5458                 req->default_rx_ring_id =
5459                         cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
5460                 req->default_cmpl_ring_id =
5461                         cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
5462                 req->enables =
5463                         cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
5464                                     VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
5465                 goto vnic_mru;
5466         }
5467         req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
5468         /* Only RSS support for now TBD: COS & LB */
5469         if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
5470                 req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5471                 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5472                                            VNIC_CFG_REQ_ENABLES_MRU);
5473         } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
5474                 req->rss_rule =
5475                         cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
5476                 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5477                                            VNIC_CFG_REQ_ENABLES_MRU);
5478                 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
5479         } else {
5480                 req->rss_rule = cpu_to_le16(0xffff);
5481         }
5482
5483         if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
5484             (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
5485                 req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
5486                 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
5487         } else {
5488                 req->cos_rule = cpu_to_le16(0xffff);
5489         }
5490
5491         if (vnic->flags & BNXT_VNIC_RSS_FLAG)
5492                 ring = 0;
5493         else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
5494                 ring = vnic_id - 1;
5495         else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
5496                 ring = bp->rx_nr_rings - 1;
5497
5498         grp_idx = bp->rx_ring[ring].bnapi->index;
5499         req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
5500         req->lb_rule = cpu_to_le16(0xffff);
5501 vnic_mru:
5502         req->mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
5503
5504         req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5505 #ifdef CONFIG_BNXT_SRIOV
5506         if (BNXT_VF(bp))
5507                 def_vlan = bp->vf.vlan;
5508 #endif
5509         if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
5510                 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
5511         if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
5512                 req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
5513
5514         return hwrm_req_send(bp, req);
5515 }
5516
5517 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
5518 {
5519         if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
5520                 struct hwrm_vnic_free_input *req;
5521
5522                 if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
5523                         return;
5524
5525                 req->vnic_id =
5526                         cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
5527
5528                 hwrm_req_send(bp, req);
5529                 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
5530         }
5531 }
5532
5533 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
5534 {
5535         u16 i;
5536
5537         for (i = 0; i < bp->nr_vnics; i++)
5538                 bnxt_hwrm_vnic_free_one(bp, i);
5539 }
5540
5541 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
5542                                 unsigned int start_rx_ring_idx,
5543                                 unsigned int nr_rings)
5544 {
5545         unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
5546         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5547         struct hwrm_vnic_alloc_output *resp;
5548         struct hwrm_vnic_alloc_input *req;
5549         int rc;
5550
5551         rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC);
5552         if (rc)
5553                 return rc;
5554
5555         if (bp->flags & BNXT_FLAG_CHIP_P5)
5556                 goto vnic_no_ring_grps;
5557
5558         /* map ring groups to this vnic */
5559         for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
5560                 grp_idx = bp->rx_ring[i].bnapi->index;
5561                 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
5562                         netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
5563                                    j, nr_rings);
5564                         break;
5565                 }
5566                 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
5567         }
5568
5569 vnic_no_ring_grps:
5570         for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
5571                 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
5572         if (vnic_id == 0)
5573                 req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
5574
5575         resp = hwrm_req_hold(bp, req);
5576         rc = hwrm_req_send(bp, req);
5577         if (!rc)
5578                 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
5579         hwrm_req_drop(bp, req);
5580         return rc;
5581 }
5582
5583 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
5584 {
5585         struct hwrm_vnic_qcaps_output *resp;
5586         struct hwrm_vnic_qcaps_input *req;
5587         int rc;
5588
5589         bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
5590         bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
5591         if (bp->hwrm_spec_code < 0x10600)
5592                 return 0;
5593
5594         rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS);
5595         if (rc)
5596                 return rc;
5597
5598         resp = hwrm_req_hold(bp, req);
5599         rc = hwrm_req_send(bp, req);
5600         if (!rc) {
5601                 u32 flags = le32_to_cpu(resp->flags);
5602
5603                 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
5604                     (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
5605                         bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
5606                 if (flags &
5607                     VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
5608                         bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
5609
5610                 /* Older P5 fw before EXT_HW_STATS support did not set
5611                  * VLAN_STRIP_CAP properly.
5612                  */
5613                 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
5614                     (BNXT_CHIP_P5_THOR(bp) &&
5615                      !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
5616                         bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
5617                 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
5618                 if (bp->max_tpa_v2) {
5619                         if (BNXT_CHIP_P5_THOR(bp))
5620                                 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
5621                         else
5622                                 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2;
5623                 }
5624         }
5625         hwrm_req_drop(bp, req);
5626         return rc;
5627 }
5628
5629 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
5630 {
5631         struct hwrm_ring_grp_alloc_output *resp;
5632         struct hwrm_ring_grp_alloc_input *req;
5633         int rc;
5634         u16 i;
5635
5636         if (bp->flags & BNXT_FLAG_CHIP_P5)
5637                 return 0;
5638
5639         rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
5640         if (rc)
5641                 return rc;
5642
5643         resp = hwrm_req_hold(bp, req);
5644         for (i = 0; i < bp->rx_nr_rings; i++) {
5645                 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
5646
5647                 req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
5648                 req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
5649                 req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
5650                 req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
5651
5652                 rc = hwrm_req_send(bp, req);
5653
5654                 if (rc)
5655                         break;
5656
5657                 bp->grp_info[grp_idx].fw_grp_id =
5658                         le32_to_cpu(resp->ring_group_id);
5659         }
5660         hwrm_req_drop(bp, req);
5661         return rc;
5662 }
5663
5664 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
5665 {
5666         struct hwrm_ring_grp_free_input *req;
5667         u16 i;
5668
5669         if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
5670                 return;
5671
5672         if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE))
5673                 return;
5674
5675         hwrm_req_hold(bp, req);
5676         for (i = 0; i < bp->cp_nr_rings; i++) {
5677                 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
5678                         continue;
5679                 req->ring_group_id =
5680                         cpu_to_le32(bp->grp_info[i].fw_grp_id);
5681
5682                 hwrm_req_send(bp, req);
5683                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
5684         }
5685         hwrm_req_drop(bp, req);
5686 }
5687
5688 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
5689                                     struct bnxt_ring_struct *ring,
5690                                     u32 ring_type, u32 map_index)
5691 {
5692         struct hwrm_ring_alloc_output *resp;
5693         struct hwrm_ring_alloc_input *req;
5694         struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
5695         struct bnxt_ring_grp_info *grp_info;
5696         int rc, err = 0;
5697         u16 ring_id;
5698
5699         rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC);
5700         if (rc)
5701                 goto exit;
5702
5703         req->enables = 0;
5704         if (rmem->nr_pages > 1) {
5705                 req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
5706                 /* Page size is in log2 units */
5707                 req->page_size = BNXT_PAGE_SHIFT;
5708                 req->page_tbl_depth = 1;
5709         } else {
5710                 req->page_tbl_addr =  cpu_to_le64(rmem->dma_arr[0]);
5711         }
5712         req->fbo = 0;
5713         /* Association of ring index with doorbell index and MSIX number */
5714         req->logical_id = cpu_to_le16(map_index);
5715
5716         switch (ring_type) {
5717         case HWRM_RING_ALLOC_TX: {
5718                 struct bnxt_tx_ring_info *txr;
5719
5720                 txr = container_of(ring, struct bnxt_tx_ring_info,
5721                                    tx_ring_struct);
5722                 req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
5723                 /* Association of transmit ring with completion ring */
5724                 grp_info = &bp->grp_info[ring->grp_idx];
5725                 req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
5726                 req->length = cpu_to_le32(bp->tx_ring_mask + 1);
5727                 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5728                 req->queue_id = cpu_to_le16(ring->queue_id);
5729                 break;
5730         }
5731         case HWRM_RING_ALLOC_RX:
5732                 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5733                 req->length = cpu_to_le32(bp->rx_ring_mask + 1);
5734                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5735                         u16 flags = 0;
5736
5737                         /* Association of rx ring with stats context */
5738                         grp_info = &bp->grp_info[ring->grp_idx];
5739                         req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
5740                         req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5741                         req->enables |= cpu_to_le32(
5742                                 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5743                         if (NET_IP_ALIGN == 2)
5744                                 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
5745                         req->flags = cpu_to_le16(flags);
5746                 }
5747                 break;
5748         case HWRM_RING_ALLOC_AGG:
5749                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5750                         req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
5751                         /* Association of agg ring with rx ring */
5752                         grp_info = &bp->grp_info[ring->grp_idx];
5753                         req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
5754                         req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
5755                         req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5756                         req->enables |= cpu_to_le32(
5757                                 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
5758                                 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5759                 } else {
5760                         req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5761                 }
5762                 req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
5763                 break;
5764         case HWRM_RING_ALLOC_CMPL:
5765                 req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
5766                 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
5767                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5768                         /* Association of cp ring with nq */
5769                         grp_info = &bp->grp_info[map_index];
5770                         req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
5771                         req->cq_handle = cpu_to_le64(ring->handle);
5772                         req->enables |= cpu_to_le32(
5773                                 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
5774                 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
5775                         req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5776                 }
5777                 break;
5778         case HWRM_RING_ALLOC_NQ:
5779                 req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
5780                 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
5781                 if (bp->flags & BNXT_FLAG_USING_MSIX)
5782                         req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5783                 break;
5784         default:
5785                 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
5786                            ring_type);
5787                 return -1;
5788         }
5789
5790         resp = hwrm_req_hold(bp, req);
5791         rc = hwrm_req_send(bp, req);
5792         err = le16_to_cpu(resp->error_code);
5793         ring_id = le16_to_cpu(resp->ring_id);
5794         hwrm_req_drop(bp, req);
5795
5796 exit:
5797         if (rc || err) {
5798                 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
5799                            ring_type, rc, err);
5800                 return -EIO;
5801         }
5802         ring->fw_ring_id = ring_id;
5803         return rc;
5804 }
5805
5806 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
5807 {
5808         int rc;
5809
5810         if (BNXT_PF(bp)) {
5811                 struct hwrm_func_cfg_input *req;
5812
5813                 rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
5814                 if (rc)
5815                         return rc;
5816
5817                 req->fid = cpu_to_le16(0xffff);
5818                 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5819                 req->async_event_cr = cpu_to_le16(idx);
5820                 return hwrm_req_send(bp, req);
5821         } else {
5822                 struct hwrm_func_vf_cfg_input *req;
5823
5824                 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
5825                 if (rc)
5826                         return rc;
5827
5828                 req->enables =
5829                         cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5830                 req->async_event_cr = cpu_to_le16(idx);
5831                 return hwrm_req_send(bp, req);
5832         }
5833 }
5834
5835 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5836                         u32 map_idx, u32 xid)
5837 {
5838         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5839                 if (BNXT_PF(bp))
5840                         db->doorbell = bp->bar1 + DB_PF_OFFSET_P5;
5841                 else
5842                         db->doorbell = bp->bar1 + DB_VF_OFFSET_P5;
5843                 switch (ring_type) {
5844                 case HWRM_RING_ALLOC_TX:
5845                         db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
5846                         break;
5847                 case HWRM_RING_ALLOC_RX:
5848                 case HWRM_RING_ALLOC_AGG:
5849                         db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
5850                         break;
5851                 case HWRM_RING_ALLOC_CMPL:
5852                         db->db_key64 = DBR_PATH_L2;
5853                         break;
5854                 case HWRM_RING_ALLOC_NQ:
5855                         db->db_key64 = DBR_PATH_L2;
5856                         break;
5857                 }
5858                 db->db_key64 |= (u64)xid << DBR_XID_SFT;
5859         } else {
5860                 db->doorbell = bp->bar1 + map_idx * 0x80;
5861                 switch (ring_type) {
5862                 case HWRM_RING_ALLOC_TX:
5863                         db->db_key32 = DB_KEY_TX;
5864                         break;
5865                 case HWRM_RING_ALLOC_RX:
5866                 case HWRM_RING_ALLOC_AGG:
5867                         db->db_key32 = DB_KEY_RX;
5868                         break;
5869                 case HWRM_RING_ALLOC_CMPL:
5870                         db->db_key32 = DB_KEY_CP;
5871                         break;
5872                 }
5873         }
5874 }
5875
5876 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5877 {
5878         bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
5879         int i, rc = 0;
5880         u32 type;
5881
5882         if (bp->flags & BNXT_FLAG_CHIP_P5)
5883                 type = HWRM_RING_ALLOC_NQ;
5884         else
5885                 type = HWRM_RING_ALLOC_CMPL;
5886         for (i = 0; i < bp->cp_nr_rings; i++) {
5887                 struct bnxt_napi *bnapi = bp->bnapi[i];
5888                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5889                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5890                 u32 map_idx = ring->map_idx;
5891                 unsigned int vector;
5892
5893                 vector = bp->irq_tbl[map_idx].vector;
5894                 disable_irq_nosync(vector);
5895                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5896                 if (rc) {
5897                         enable_irq(vector);
5898                         goto err_out;
5899                 }
5900                 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
5901                 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5902                 enable_irq(vector);
5903                 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
5904
5905                 if (!i) {
5906                         rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
5907                         if (rc)
5908                                 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
5909                 }
5910         }
5911
5912         type = HWRM_RING_ALLOC_TX;
5913         for (i = 0; i < bp->tx_nr_rings; i++) {
5914                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5915                 struct bnxt_ring_struct *ring;
5916                 u32 map_idx;
5917
5918                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5919                         struct bnxt_napi *bnapi = txr->bnapi;
5920                         struct bnxt_cp_ring_info *cpr, *cpr2;
5921                         u32 type2 = HWRM_RING_ALLOC_CMPL;
5922
5923                         cpr = &bnapi->cp_ring;
5924                         cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
5925                         ring = &cpr2->cp_ring_struct;
5926                         ring->handle = BNXT_TX_HDL;
5927                         map_idx = bnapi->index;
5928                         rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5929                         if (rc)
5930                                 goto err_out;
5931                         bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5932                                     ring->fw_ring_id);
5933                         bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5934                 }
5935                 ring = &txr->tx_ring_struct;
5936                 map_idx = i;
5937                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5938                 if (rc)
5939                         goto err_out;
5940                 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
5941         }
5942
5943         type = HWRM_RING_ALLOC_RX;
5944         for (i = 0; i < bp->rx_nr_rings; i++) {
5945                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5946                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5947                 struct bnxt_napi *bnapi = rxr->bnapi;
5948                 u32 map_idx = bnapi->index;
5949
5950                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5951                 if (rc)
5952                         goto err_out;
5953                 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
5954                 /* If we have agg rings, post agg buffers first. */
5955                 if (!agg_rings)
5956                         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5957                 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
5958                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5959                         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5960                         u32 type2 = HWRM_RING_ALLOC_CMPL;
5961                         struct bnxt_cp_ring_info *cpr2;
5962
5963                         cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
5964                         ring = &cpr2->cp_ring_struct;
5965                         ring->handle = BNXT_RX_HDL;
5966                         rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5967                         if (rc)
5968                                 goto err_out;
5969                         bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5970                                     ring->fw_ring_id);
5971                         bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5972                 }
5973         }
5974
5975         if (agg_rings) {
5976                 type = HWRM_RING_ALLOC_AGG;
5977                 for (i = 0; i < bp->rx_nr_rings; i++) {
5978                         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5979                         struct bnxt_ring_struct *ring =
5980                                                 &rxr->rx_agg_ring_struct;
5981                         u32 grp_idx = ring->grp_idx;
5982                         u32 map_idx = grp_idx + bp->rx_nr_rings;
5983
5984                         rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5985                         if (rc)
5986                                 goto err_out;
5987
5988                         bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5989                                     ring->fw_ring_id);
5990                         bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
5991                         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5992                         bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
5993                 }
5994         }
5995 err_out:
5996         return rc;
5997 }
5998
5999 static int hwrm_ring_free_send_msg(struct bnxt *bp,
6000                                    struct bnxt_ring_struct *ring,
6001                                    u32 ring_type, int cmpl_ring_id)
6002 {
6003         struct hwrm_ring_free_output *resp;
6004         struct hwrm_ring_free_input *req;
6005         u16 error_code = 0;
6006         int rc;
6007
6008         if (BNXT_NO_FW_ACCESS(bp))
6009                 return 0;
6010
6011         rc = hwrm_req_init(bp, req, HWRM_RING_FREE);
6012         if (rc)
6013                 goto exit;
6014
6015         req->cmpl_ring = cpu_to_le16(cmpl_ring_id);
6016         req->ring_type = ring_type;
6017         req->ring_id = cpu_to_le16(ring->fw_ring_id);
6018
6019         resp = hwrm_req_hold(bp, req);
6020         rc = hwrm_req_send(bp, req);
6021         error_code = le16_to_cpu(resp->error_code);
6022         hwrm_req_drop(bp, req);
6023 exit:
6024         if (rc || error_code) {
6025                 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
6026                            ring_type, rc, error_code);
6027                 return -EIO;
6028         }
6029         return 0;
6030 }
6031
6032 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
6033 {
6034         u32 type;
6035         int i;
6036
6037         if (!bp->bnapi)
6038                 return;
6039
6040         for (i = 0; i < bp->tx_nr_rings; i++) {
6041                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
6042                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
6043
6044                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
6045                         u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
6046
6047                         hwrm_ring_free_send_msg(bp, ring,
6048                                                 RING_FREE_REQ_RING_TYPE_TX,
6049                                                 close_path ? cmpl_ring_id :
6050                                                 INVALID_HW_RING_ID);
6051                         ring->fw_ring_id = INVALID_HW_RING_ID;
6052                 }
6053         }
6054
6055         for (i = 0; i < bp->rx_nr_rings; i++) {
6056                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
6057                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
6058                 u32 grp_idx = rxr->bnapi->index;
6059
6060                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
6061                         u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
6062
6063                         hwrm_ring_free_send_msg(bp, ring,
6064                                                 RING_FREE_REQ_RING_TYPE_RX,
6065                                                 close_path ? cmpl_ring_id :
6066                                                 INVALID_HW_RING_ID);
6067                         ring->fw_ring_id = INVALID_HW_RING_ID;
6068                         bp->grp_info[grp_idx].rx_fw_ring_id =
6069                                 INVALID_HW_RING_ID;
6070                 }
6071         }
6072
6073         if (bp->flags & BNXT_FLAG_CHIP_P5)
6074                 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
6075         else
6076                 type = RING_FREE_REQ_RING_TYPE_RX;
6077         for (i = 0; i < bp->rx_nr_rings; i++) {
6078                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
6079                 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
6080                 u32 grp_idx = rxr->bnapi->index;
6081
6082                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
6083                         u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
6084
6085                         hwrm_ring_free_send_msg(bp, ring, type,
6086                                                 close_path ? cmpl_ring_id :
6087                                                 INVALID_HW_RING_ID);
6088                         ring->fw_ring_id = INVALID_HW_RING_ID;
6089                         bp->grp_info[grp_idx].agg_fw_ring_id =
6090                                 INVALID_HW_RING_ID;
6091                 }
6092         }
6093
6094         /* The completion rings are about to be freed.  After that the
6095          * IRQ doorbell will not work anymore.  So we need to disable
6096          * IRQ here.
6097          */
6098         bnxt_disable_int_sync(bp);
6099
6100         if (bp->flags & BNXT_FLAG_CHIP_P5)
6101                 type = RING_FREE_REQ_RING_TYPE_NQ;
6102         else
6103                 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
6104         for (i = 0; i < bp->cp_nr_rings; i++) {
6105                 struct bnxt_napi *bnapi = bp->bnapi[i];
6106                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6107                 struct bnxt_ring_struct *ring;
6108                 int j;
6109
6110                 for (j = 0; j < 2; j++) {
6111                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
6112
6113                         if (cpr2) {
6114                                 ring = &cpr2->cp_ring_struct;
6115                                 if (ring->fw_ring_id == INVALID_HW_RING_ID)
6116                                         continue;
6117                                 hwrm_ring_free_send_msg(bp, ring,
6118                                         RING_FREE_REQ_RING_TYPE_L2_CMPL,
6119                                         INVALID_HW_RING_ID);
6120                                 ring->fw_ring_id = INVALID_HW_RING_ID;
6121                         }
6122                 }
6123                 ring = &cpr->cp_ring_struct;
6124                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
6125                         hwrm_ring_free_send_msg(bp, ring, type,
6126                                                 INVALID_HW_RING_ID);
6127                         ring->fw_ring_id = INVALID_HW_RING_ID;
6128                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
6129                 }
6130         }
6131 }
6132
6133 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
6134                            bool shared);
6135
6136 static int bnxt_hwrm_get_rings(struct bnxt *bp)
6137 {
6138         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6139         struct hwrm_func_qcfg_output *resp;
6140         struct hwrm_func_qcfg_input *req;
6141         int rc;
6142
6143         if (bp->hwrm_spec_code < 0x10601)
6144                 return 0;
6145
6146         rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
6147         if (rc)
6148                 return rc;
6149
6150         req->fid = cpu_to_le16(0xffff);
6151         resp = hwrm_req_hold(bp, req);
6152         rc = hwrm_req_send(bp, req);
6153         if (rc) {
6154                 hwrm_req_drop(bp, req);
6155                 return rc;
6156         }
6157
6158         hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
6159         if (BNXT_NEW_RM(bp)) {
6160                 u16 cp, stats;
6161
6162                 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
6163                 hw_resc->resv_hw_ring_grps =
6164                         le32_to_cpu(resp->alloc_hw_ring_grps);
6165                 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
6166                 cp = le16_to_cpu(resp->alloc_cmpl_rings);
6167                 stats = le16_to_cpu(resp->alloc_stat_ctx);
6168                 hw_resc->resv_irqs = cp;
6169                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6170                         int rx = hw_resc->resv_rx_rings;
6171                         int tx = hw_resc->resv_tx_rings;
6172
6173                         if (bp->flags & BNXT_FLAG_AGG_RINGS)
6174                                 rx >>= 1;
6175                         if (cp < (rx + tx)) {
6176                                 bnxt_trim_rings(bp, &rx, &tx, cp, false);
6177                                 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6178                                         rx <<= 1;
6179                                 hw_resc->resv_rx_rings = rx;
6180                                 hw_resc->resv_tx_rings = tx;
6181                         }
6182                         hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
6183                         hw_resc->resv_hw_ring_grps = rx;
6184                 }
6185                 hw_resc->resv_cp_rings = cp;
6186                 hw_resc->resv_stat_ctxs = stats;
6187         }
6188         hwrm_req_drop(bp, req);
6189         return 0;
6190 }
6191
6192 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
6193 {
6194         struct hwrm_func_qcfg_output *resp;
6195         struct hwrm_func_qcfg_input *req;
6196         int rc;
6197
6198         if (bp->hwrm_spec_code < 0x10601)
6199                 return 0;
6200
6201         rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
6202         if (rc)
6203                 return rc;
6204
6205         req->fid = cpu_to_le16(fid);
6206         resp = hwrm_req_hold(bp, req);
6207         rc = hwrm_req_send(bp, req);
6208         if (!rc)
6209                 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
6210
6211         hwrm_req_drop(bp, req);
6212         return rc;
6213 }
6214
6215 static bool bnxt_rfs_supported(struct bnxt *bp);
6216
6217 static struct hwrm_func_cfg_input *
6218 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6219                              int ring_grps, int cp_rings, int stats, int vnics)
6220 {
6221         struct hwrm_func_cfg_input *req;
6222         u32 enables = 0;
6223
6224         if (hwrm_req_init(bp, req, HWRM_FUNC_CFG))
6225                 return NULL;
6226
6227         req->fid = cpu_to_le16(0xffff);
6228         enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
6229         req->num_tx_rings = cpu_to_le16(tx_rings);
6230         if (BNXT_NEW_RM(bp)) {
6231                 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
6232                 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
6233                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6234                         enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
6235                         enables |= tx_rings + ring_grps ?
6236                                    FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6237                         enables |= rx_rings ?
6238                                 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6239                 } else {
6240                         enables |= cp_rings ?
6241                                    FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6242                         enables |= ring_grps ?
6243                                    FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
6244                                    FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6245                 }
6246                 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
6247
6248                 req->num_rx_rings = cpu_to_le16(rx_rings);
6249                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6250                         req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6251                         req->num_msix = cpu_to_le16(cp_rings);
6252                         req->num_rsscos_ctxs =
6253                                 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6254                 } else {
6255                         req->num_cmpl_rings = cpu_to_le16(cp_rings);
6256                         req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6257                         req->num_rsscos_ctxs = cpu_to_le16(1);
6258                         if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
6259                             bnxt_rfs_supported(bp))
6260                                 req->num_rsscos_ctxs =
6261                                         cpu_to_le16(ring_grps + 1);
6262                 }
6263                 req->num_stat_ctxs = cpu_to_le16(stats);
6264                 req->num_vnics = cpu_to_le16(vnics);
6265         }
6266         req->enables = cpu_to_le32(enables);
6267         return req;
6268 }
6269
6270 static struct hwrm_func_vf_cfg_input *
6271 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6272                              int ring_grps, int cp_rings, int stats, int vnics)
6273 {
6274         struct hwrm_func_vf_cfg_input *req;
6275         u32 enables = 0;
6276
6277         if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
6278                 return NULL;
6279
6280         enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
6281         enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
6282                               FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6283         enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
6284         if (bp->flags & BNXT_FLAG_CHIP_P5) {
6285                 enables |= tx_rings + ring_grps ?
6286                            FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6287         } else {
6288                 enables |= cp_rings ?
6289                            FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6290                 enables |= ring_grps ?
6291                            FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
6292         }
6293         enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
6294         enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
6295
6296         req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
6297         req->num_tx_rings = cpu_to_le16(tx_rings);
6298         req->num_rx_rings = cpu_to_le16(rx_rings);
6299         if (bp->flags & BNXT_FLAG_CHIP_P5) {
6300                 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6301                 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6302         } else {
6303                 req->num_cmpl_rings = cpu_to_le16(cp_rings);
6304                 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6305                 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
6306         }
6307         req->num_stat_ctxs = cpu_to_le16(stats);
6308         req->num_vnics = cpu_to_le16(vnics);
6309
6310         req->enables = cpu_to_le32(enables);
6311         return req;
6312 }
6313
6314 static int
6315 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6316                            int ring_grps, int cp_rings, int stats, int vnics)
6317 {
6318         struct hwrm_func_cfg_input *req;
6319         int rc;
6320
6321         req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
6322                                            cp_rings, stats, vnics);
6323         if (!req)
6324                 return -ENOMEM;
6325
6326         if (!req->enables) {
6327                 hwrm_req_drop(bp, req);
6328                 return 0;
6329         }
6330
6331         rc = hwrm_req_send(bp, req);
6332         if (rc)
6333                 return rc;
6334
6335         if (bp->hwrm_spec_code < 0x10601)
6336                 bp->hw_resc.resv_tx_rings = tx_rings;
6337
6338         return bnxt_hwrm_get_rings(bp);
6339 }
6340
6341 static int
6342 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6343                            int ring_grps, int cp_rings, int stats, int vnics)
6344 {
6345         struct hwrm_func_vf_cfg_input *req;
6346         int rc;
6347
6348         if (!BNXT_NEW_RM(bp)) {
6349                 bp->hw_resc.resv_tx_rings = tx_rings;
6350                 return 0;
6351         }
6352
6353         req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6354                                            cp_rings, stats, vnics);
6355         if (!req)
6356                 return -ENOMEM;
6357
6358         rc = hwrm_req_send(bp, req);
6359         if (rc)
6360                 return rc;
6361
6362         return bnxt_hwrm_get_rings(bp);
6363 }
6364
6365 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
6366                                    int cp, int stat, int vnic)
6367 {
6368         if (BNXT_PF(bp))
6369                 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
6370                                                   vnic);
6371         else
6372                 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
6373                                                   vnic);
6374 }
6375
6376 int bnxt_nq_rings_in_use(struct bnxt *bp)
6377 {
6378         int cp = bp->cp_nr_rings;
6379         int ulp_msix, ulp_base;
6380
6381         ulp_msix = bnxt_get_ulp_msix_num(bp);
6382         if (ulp_msix) {
6383                 ulp_base = bnxt_get_ulp_msix_base(bp);
6384                 cp += ulp_msix;
6385                 if ((ulp_base + ulp_msix) > cp)
6386                         cp = ulp_base + ulp_msix;
6387         }
6388         return cp;
6389 }
6390
6391 static int bnxt_cp_rings_in_use(struct bnxt *bp)
6392 {
6393         int cp;
6394
6395         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6396                 return bnxt_nq_rings_in_use(bp);
6397
6398         cp = bp->tx_nr_rings + bp->rx_nr_rings;
6399         return cp;
6400 }
6401
6402 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
6403 {
6404         int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
6405         int cp = bp->cp_nr_rings;
6406
6407         if (!ulp_stat)
6408                 return cp;
6409
6410         if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
6411                 return bnxt_get_ulp_msix_base(bp) + ulp_stat;
6412
6413         return cp + ulp_stat;
6414 }
6415
6416 /* Check if a default RSS map needs to be setup.  This function is only
6417  * used on older firmware that does not require reserving RX rings.
6418  */
6419 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
6420 {
6421         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6422
6423         /* The RSS map is valid for RX rings set to resv_rx_rings */
6424         if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
6425                 hw_resc->resv_rx_rings = bp->rx_nr_rings;
6426                 if (!netif_is_rxfh_configured(bp->dev))
6427                         bnxt_set_dflt_rss_indir_tbl(bp);
6428         }
6429 }
6430
6431 static bool bnxt_need_reserve_rings(struct bnxt *bp)
6432 {
6433         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6434         int cp = bnxt_cp_rings_in_use(bp);
6435         int nq = bnxt_nq_rings_in_use(bp);
6436         int rx = bp->rx_nr_rings, stat;
6437         int vnic = 1, grp = rx;
6438
6439         if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
6440             bp->hwrm_spec_code >= 0x10601)
6441                 return true;
6442
6443         /* Old firmware does not need RX ring reservations but we still
6444          * need to setup a default RSS map when needed.  With new firmware
6445          * we go through RX ring reservations first and then set up the
6446          * RSS map for the successfully reserved RX rings when needed.
6447          */
6448         if (!BNXT_NEW_RM(bp)) {
6449                 bnxt_check_rss_tbl_no_rmgr(bp);
6450                 return false;
6451         }
6452         if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6453                 vnic = rx + 1;
6454         if (bp->flags & BNXT_FLAG_AGG_RINGS)
6455                 rx <<= 1;
6456         stat = bnxt_get_func_stat_ctxs(bp);
6457         if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
6458             hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
6459             (hw_resc->resv_hw_ring_grps != grp &&
6460              !(bp->flags & BNXT_FLAG_CHIP_P5)))
6461                 return true;
6462         if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
6463             hw_resc->resv_irqs != nq)
6464                 return true;
6465         return false;
6466 }
6467
6468 static int __bnxt_reserve_rings(struct bnxt *bp)
6469 {
6470         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6471         int cp = bnxt_nq_rings_in_use(bp);
6472         int tx = bp->tx_nr_rings;
6473         int rx = bp->rx_nr_rings;
6474         int grp, rx_rings, rc;
6475         int vnic = 1, stat;
6476         bool sh = false;
6477
6478         if (!bnxt_need_reserve_rings(bp))
6479                 return 0;
6480
6481         if (bp->flags & BNXT_FLAG_SHARED_RINGS)
6482                 sh = true;
6483         if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6484                 vnic = rx + 1;
6485         if (bp->flags & BNXT_FLAG_AGG_RINGS)
6486                 rx <<= 1;
6487         grp = bp->rx_nr_rings;
6488         stat = bnxt_get_func_stat_ctxs(bp);
6489
6490         rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
6491         if (rc)
6492                 return rc;
6493
6494         tx = hw_resc->resv_tx_rings;
6495         if (BNXT_NEW_RM(bp)) {
6496                 rx = hw_resc->resv_rx_rings;
6497                 cp = hw_resc->resv_irqs;
6498                 grp = hw_resc->resv_hw_ring_grps;
6499                 vnic = hw_resc->resv_vnics;
6500                 stat = hw_resc->resv_stat_ctxs;
6501         }
6502
6503         rx_rings = rx;
6504         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6505                 if (rx >= 2) {
6506                         rx_rings = rx >> 1;
6507                 } else {
6508                         if (netif_running(bp->dev))
6509                                 return -ENOMEM;
6510
6511                         bp->flags &= ~BNXT_FLAG_AGG_RINGS;
6512                         bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
6513                         bp->dev->hw_features &= ~NETIF_F_LRO;
6514                         bp->dev->features &= ~NETIF_F_LRO;
6515                         bnxt_set_ring_params(bp);
6516                 }
6517         }
6518         rx_rings = min_t(int, rx_rings, grp);
6519         cp = min_t(int, cp, bp->cp_nr_rings);
6520         if (stat > bnxt_get_ulp_stat_ctxs(bp))
6521                 stat -= bnxt_get_ulp_stat_ctxs(bp);
6522         cp = min_t(int, cp, stat);
6523         rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
6524         if (bp->flags & BNXT_FLAG_AGG_RINGS)
6525                 rx = rx_rings << 1;
6526         cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
6527         bp->tx_nr_rings = tx;
6528
6529         /* If we cannot reserve all the RX rings, reset the RSS map only
6530          * if absolutely necessary
6531          */
6532         if (rx_rings != bp->rx_nr_rings) {
6533                 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
6534                             rx_rings, bp->rx_nr_rings);
6535                 if (netif_is_rxfh_configured(bp->dev) &&
6536                     (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
6537                      bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
6538                      bnxt_get_max_rss_ring(bp) >= rx_rings)) {
6539                         netdev_warn(bp->dev, "RSS table entries reverting to default\n");
6540                         bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
6541                 }
6542         }
6543         bp->rx_nr_rings = rx_rings;
6544         bp->cp_nr_rings = cp;
6545
6546         if (!tx || !rx || !cp || !grp || !vnic || !stat)
6547                 return -ENOMEM;
6548
6549         if (!netif_is_rxfh_configured(bp->dev))
6550                 bnxt_set_dflt_rss_indir_tbl(bp);
6551
6552         return rc;
6553 }
6554
6555 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6556                                     int ring_grps, int cp_rings, int stats,
6557                                     int vnics)
6558 {
6559         struct hwrm_func_vf_cfg_input *req;
6560         u32 flags;
6561
6562         if (!BNXT_NEW_RM(bp))
6563                 return 0;
6564
6565         req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6566                                            cp_rings, stats, vnics);
6567         flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
6568                 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6569                 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6570                 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6571                 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
6572                 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
6573         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6574                 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6575
6576         req->flags = cpu_to_le32(flags);
6577         return hwrm_req_send_silent(bp, req);
6578 }
6579
6580 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6581                                     int ring_grps, int cp_rings, int stats,
6582                                     int vnics)
6583 {
6584         struct hwrm_func_cfg_input *req;
6585         u32 flags;
6586
6587         req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
6588                                            cp_rings, stats, vnics);
6589         flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
6590         if (BNXT_NEW_RM(bp)) {
6591                 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6592                          FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6593                          FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6594                          FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
6595                 if (bp->flags & BNXT_FLAG_CHIP_P5)
6596                         flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
6597                                  FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
6598                 else
6599                         flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6600         }
6601
6602         req->flags = cpu_to_le32(flags);
6603         return hwrm_req_send_silent(bp, req);
6604 }
6605
6606 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6607                                  int ring_grps, int cp_rings, int stats,
6608                                  int vnics)
6609 {
6610         if (bp->hwrm_spec_code < 0x10801)
6611                 return 0;
6612
6613         if (BNXT_PF(bp))
6614                 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
6615                                                 ring_grps, cp_rings, stats,
6616                                                 vnics);
6617
6618         return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6619                                         cp_rings, stats, vnics);
6620 }
6621
6622 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
6623 {
6624         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6625         struct hwrm_ring_aggint_qcaps_output *resp;
6626         struct hwrm_ring_aggint_qcaps_input *req;
6627         int rc;
6628
6629         coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
6630         coal_cap->num_cmpl_dma_aggr_max = 63;
6631         coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
6632         coal_cap->cmpl_aggr_dma_tmr_max = 65535;
6633         coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
6634         coal_cap->int_lat_tmr_min_max = 65535;
6635         coal_cap->int_lat_tmr_max_max = 65535;
6636         coal_cap->num_cmpl_aggr_int_max = 65535;
6637         coal_cap->timer_units = 80;
6638
6639         if (bp->hwrm_spec_code < 0x10902)
6640                 return;
6641
6642         if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS))
6643                 return;
6644
6645         resp = hwrm_req_hold(bp, req);
6646         rc = hwrm_req_send_silent(bp, req);
6647         if (!rc) {
6648                 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
6649                 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
6650                 coal_cap->num_cmpl_dma_aggr_max =
6651                         le16_to_cpu(resp->num_cmpl_dma_aggr_max);
6652                 coal_cap->num_cmpl_dma_aggr_during_int_max =
6653                         le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
6654                 coal_cap->cmpl_aggr_dma_tmr_max =
6655                         le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
6656                 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
6657                         le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
6658                 coal_cap->int_lat_tmr_min_max =
6659                         le16_to_cpu(resp->int_lat_tmr_min_max);
6660                 coal_cap->int_lat_tmr_max_max =
6661                         le16_to_cpu(resp->int_lat_tmr_max_max);
6662                 coal_cap->num_cmpl_aggr_int_max =
6663                         le16_to_cpu(resp->num_cmpl_aggr_int_max);
6664                 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
6665         }
6666         hwrm_req_drop(bp, req);
6667 }
6668
6669 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
6670 {
6671         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6672
6673         return usec * 1000 / coal_cap->timer_units;
6674 }
6675
6676 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
6677         struct bnxt_coal *hw_coal,
6678         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
6679 {
6680         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6681         u16 val, tmr, max, flags = hw_coal->flags;
6682         u32 cmpl_params = coal_cap->cmpl_params;
6683
6684         max = hw_coal->bufs_per_record * 128;
6685         if (hw_coal->budget)
6686                 max = hw_coal->bufs_per_record * hw_coal->budget;
6687         max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
6688
6689         val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
6690         req->num_cmpl_aggr_int = cpu_to_le16(val);
6691
6692         val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
6693         req->num_cmpl_dma_aggr = cpu_to_le16(val);
6694
6695         val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
6696                       coal_cap->num_cmpl_dma_aggr_during_int_max);
6697         req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
6698
6699         tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
6700         tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
6701         req->int_lat_tmr_max = cpu_to_le16(tmr);
6702
6703         /* min timer set to 1/2 of interrupt timer */
6704         if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
6705                 val = tmr / 2;
6706                 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
6707                 req->int_lat_tmr_min = cpu_to_le16(val);
6708                 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6709         }
6710
6711         /* buf timer set to 1/4 of interrupt timer */
6712         val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
6713         req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
6714
6715         if (cmpl_params &
6716             RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
6717                 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
6718                 val = clamp_t(u16, tmr, 1,
6719                               coal_cap->cmpl_aggr_dma_tmr_during_int_max);
6720                 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
6721                 req->enables |=
6722                         cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
6723         }
6724
6725         if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
6726             hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
6727                 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
6728         req->flags = cpu_to_le16(flags);
6729         req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
6730 }
6731
6732 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
6733                                    struct bnxt_coal *hw_coal)
6734 {
6735         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req;
6736         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6737         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6738         u32 nq_params = coal_cap->nq_params;
6739         u16 tmr;
6740         int rc;
6741
6742         if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
6743                 return 0;
6744
6745         rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6746         if (rc)
6747                 return rc;
6748
6749         req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
6750         req->flags =
6751                 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
6752
6753         tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
6754         tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
6755         req->int_lat_tmr_min = cpu_to_le16(tmr);
6756         req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6757         return hwrm_req_send(bp, req);
6758 }
6759
6760 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
6761 {
6762         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx;
6763         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6764         struct bnxt_coal coal;
6765         int rc;
6766
6767         /* Tick values in micro seconds.
6768          * 1 coal_buf x bufs_per_record = 1 completion record.
6769          */
6770         memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
6771
6772         coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
6773         coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
6774
6775         if (!bnapi->rx_ring)
6776                 return -ENODEV;
6777
6778         rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6779         if (rc)
6780                 return rc;
6781
6782         bnxt_hwrm_set_coal_params(bp, &coal, req_rx);
6783
6784         req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
6785
6786         return hwrm_req_send(bp, req_rx);
6787 }
6788
6789 int bnxt_hwrm_set_coal(struct bnxt *bp)
6790 {
6791         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx,
6792                                                            *req;
6793         int i, rc;
6794
6795         rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6796         if (rc)
6797                 return rc;
6798
6799         rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6800         if (rc) {
6801                 hwrm_req_drop(bp, req_rx);
6802                 return rc;
6803         }
6804
6805         bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx);
6806         bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx);
6807
6808         hwrm_req_hold(bp, req_rx);
6809         hwrm_req_hold(bp, req_tx);
6810         for (i = 0; i < bp->cp_nr_rings; i++) {
6811                 struct bnxt_napi *bnapi = bp->bnapi[i];
6812                 struct bnxt_coal *hw_coal;
6813                 u16 ring_id;
6814
6815                 req = req_rx;
6816                 if (!bnapi->rx_ring) {
6817                         ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6818                         req = req_tx;
6819                 } else {
6820                         ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
6821                 }
6822                 req->ring_id = cpu_to_le16(ring_id);
6823
6824                 rc = hwrm_req_send(bp, req);
6825                 if (rc)
6826                         break;
6827
6828                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6829                         continue;
6830
6831                 if (bnapi->rx_ring && bnapi->tx_ring) {
6832                         req = req_tx;
6833                         ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6834                         req->ring_id = cpu_to_le16(ring_id);
6835                         rc = hwrm_req_send(bp, req);
6836                         if (rc)
6837                                 break;
6838                 }
6839                 if (bnapi->rx_ring)
6840                         hw_coal = &bp->rx_coal;
6841                 else
6842                         hw_coal = &bp->tx_coal;
6843                 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
6844         }
6845         hwrm_req_drop(bp, req_rx);
6846         hwrm_req_drop(bp, req_tx);
6847         return rc;
6848 }
6849
6850 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
6851 {
6852         struct hwrm_stat_ctx_clr_stats_input *req0 = NULL;
6853         struct hwrm_stat_ctx_free_input *req;
6854         int i;
6855
6856         if (!bp->bnapi)
6857                 return;
6858
6859         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6860                 return;
6861
6862         if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE))
6863                 return;
6864         if (BNXT_FW_MAJ(bp) <= 20) {
6865                 if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) {
6866                         hwrm_req_drop(bp, req);
6867                         return;
6868                 }
6869                 hwrm_req_hold(bp, req0);
6870         }
6871         hwrm_req_hold(bp, req);
6872         for (i = 0; i < bp->cp_nr_rings; i++) {
6873                 struct bnxt_napi *bnapi = bp->bnapi[i];
6874                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6875
6876                 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
6877                         req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
6878                         if (req0) {
6879                                 req0->stat_ctx_id = req->stat_ctx_id;
6880                                 hwrm_req_send(bp, req0);
6881                         }
6882                         hwrm_req_send(bp, req);
6883
6884                         cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
6885                 }
6886         }
6887         hwrm_req_drop(bp, req);
6888         if (req0)
6889                 hwrm_req_drop(bp, req0);
6890 }
6891
6892 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
6893 {
6894         struct hwrm_stat_ctx_alloc_output *resp;
6895         struct hwrm_stat_ctx_alloc_input *req;
6896         int rc, i;
6897
6898         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6899                 return 0;
6900
6901         rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC);
6902         if (rc)
6903                 return rc;
6904
6905         req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
6906         req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
6907
6908         resp = hwrm_req_hold(bp, req);
6909         for (i = 0; i < bp->cp_nr_rings; i++) {
6910                 struct bnxt_napi *bnapi = bp->bnapi[i];
6911                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6912
6913                 req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
6914
6915                 rc = hwrm_req_send(bp, req);
6916                 if (rc)
6917                         break;
6918
6919                 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
6920
6921                 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
6922         }
6923         hwrm_req_drop(bp, req);
6924         return rc;
6925 }
6926
6927 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
6928 {
6929         struct hwrm_func_qcfg_output *resp;
6930         struct hwrm_func_qcfg_input *req;
6931         u32 min_db_offset = 0;
6932         u16 flags;
6933         int rc;
6934
6935         rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
6936         if (rc)
6937                 return rc;
6938
6939         req->fid = cpu_to_le16(0xffff);
6940         resp = hwrm_req_hold(bp, req);
6941         rc = hwrm_req_send(bp, req);
6942         if (rc)
6943                 goto func_qcfg_exit;
6944
6945 #ifdef CONFIG_BNXT_SRIOV
6946         if (BNXT_VF(bp)) {
6947                 struct bnxt_vf_info *vf = &bp->vf;
6948
6949                 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
6950         } else {
6951                 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
6952         }
6953 #endif
6954         flags = le16_to_cpu(resp->flags);
6955         if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
6956                      FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
6957                 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
6958                 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
6959                         bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
6960         }
6961         if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
6962                 bp->flags |= BNXT_FLAG_MULTI_HOST;
6963         if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
6964                 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
6965
6966         switch (resp->port_partition_type) {
6967         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
6968         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
6969         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
6970                 bp->port_partition_type = resp->port_partition_type;
6971                 break;
6972         }
6973         if (bp->hwrm_spec_code < 0x10707 ||
6974             resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
6975                 bp->br_mode = BRIDGE_MODE_VEB;
6976         else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
6977                 bp->br_mode = BRIDGE_MODE_VEPA;
6978         else
6979                 bp->br_mode = BRIDGE_MODE_UNDEF;
6980
6981         bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
6982         if (!bp->max_mtu)
6983                 bp->max_mtu = BNXT_MAX_MTU;
6984
6985         if (bp->db_size)
6986                 goto func_qcfg_exit;
6987
6988         if (bp->flags & BNXT_FLAG_CHIP_P5) {
6989                 if (BNXT_PF(bp))
6990                         min_db_offset = DB_PF_OFFSET_P5;
6991                 else
6992                         min_db_offset = DB_VF_OFFSET_P5;
6993         }
6994         bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
6995                                  1024);
6996         if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
6997             bp->db_size <= min_db_offset)
6998                 bp->db_size = pci_resource_len(bp->pdev, 2);
6999
7000 func_qcfg_exit:
7001         hwrm_req_drop(bp, req);
7002         return rc;
7003 }
7004
7005 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info *ctx,
7006                         struct hwrm_func_backing_store_qcaps_output *resp)
7007 {
7008         struct bnxt_mem_init *mem_init;
7009         u16 init_mask;
7010         u8 init_val;
7011         u8 *offset;
7012         int i;
7013
7014         init_val = resp->ctx_kind_initializer;
7015         init_mask = le16_to_cpu(resp->ctx_init_mask);
7016         offset = &resp->qp_init_offset;
7017         mem_init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
7018         for (i = 0; i < BNXT_CTX_MEM_INIT_MAX; i++, mem_init++, offset++) {
7019                 mem_init->init_val = init_val;
7020                 mem_init->offset = BNXT_MEM_INVALID_OFFSET;
7021                 if (!init_mask)
7022                         continue;
7023                 if (i == BNXT_CTX_MEM_INIT_STAT)
7024                         offset = &resp->stat_init_offset;
7025                 if (init_mask & (1 << i))
7026                         mem_init->offset = *offset * 4;
7027                 else
7028                         mem_init->init_val = 0;
7029         }
7030         ctx->mem_init[BNXT_CTX_MEM_INIT_QP].size = ctx->qp_entry_size;
7031         ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ].size = ctx->srq_entry_size;
7032         ctx->mem_init[BNXT_CTX_MEM_INIT_CQ].size = ctx->cq_entry_size;
7033         ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC].size = ctx->vnic_entry_size;
7034         ctx->mem_init[BNXT_CTX_MEM_INIT_STAT].size = ctx->stat_entry_size;
7035         ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV].size = ctx->mrav_entry_size;
7036 }
7037
7038 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
7039 {
7040         struct hwrm_func_backing_store_qcaps_output *resp;
7041         struct hwrm_func_backing_store_qcaps_input *req;
7042         int rc;
7043
7044         if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
7045                 return 0;
7046
7047         rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
7048         if (rc)
7049                 return rc;
7050
7051         resp = hwrm_req_hold(bp, req);
7052         rc = hwrm_req_send_silent(bp, req);
7053         if (!rc) {
7054                 struct bnxt_ctx_pg_info *ctx_pg;
7055                 struct bnxt_ctx_mem_info *ctx;
7056                 int i, tqm_rings;
7057
7058                 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
7059                 if (!ctx) {
7060                         rc = -ENOMEM;
7061                         goto ctx_err;
7062                 }
7063                 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
7064                 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
7065                 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
7066                 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
7067                 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
7068                 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
7069                 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
7070                 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
7071                 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
7072                 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
7073                 ctx->vnic_max_vnic_entries =
7074                         le16_to_cpu(resp->vnic_max_vnic_entries);
7075                 ctx->vnic_max_ring_table_entries =
7076                         le16_to_cpu(resp->vnic_max_ring_table_entries);
7077                 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
7078                 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
7079                 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
7080                 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
7081                 ctx->tqm_min_entries_per_ring =
7082                         le32_to_cpu(resp->tqm_min_entries_per_ring);
7083                 ctx->tqm_max_entries_per_ring =
7084                         le32_to_cpu(resp->tqm_max_entries_per_ring);
7085                 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
7086                 if (!ctx->tqm_entries_multiple)
7087                         ctx->tqm_entries_multiple = 1;
7088                 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
7089                 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
7090                 ctx->mrav_num_entries_units =
7091                         le16_to_cpu(resp->mrav_num_entries_units);
7092                 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
7093                 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
7094
7095                 bnxt_init_ctx_initializer(ctx, resp);
7096
7097                 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
7098                 if (!ctx->tqm_fp_rings_count)
7099                         ctx->tqm_fp_rings_count = bp->max_q;
7100                 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
7101                         ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
7102
7103                 tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
7104                 ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
7105                 if (!ctx_pg) {
7106                         kfree(ctx);
7107                         rc = -ENOMEM;
7108                         goto ctx_err;
7109                 }
7110                 for (i = 0; i < tqm_rings; i++, ctx_pg++)
7111                         ctx->tqm_mem[i] = ctx_pg;
7112                 bp->ctx = ctx;
7113         } else {
7114                 rc = 0;
7115         }
7116 ctx_err:
7117         hwrm_req_drop(bp, req);
7118         return rc;
7119 }
7120
7121 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
7122                                   __le64 *pg_dir)
7123 {
7124         if (!rmem->nr_pages)
7125                 return;
7126
7127         BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
7128         if (rmem->depth >= 1) {
7129                 if (rmem->depth == 2)
7130                         *pg_attr |= 2;
7131                 else
7132                         *pg_attr |= 1;
7133                 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
7134         } else {
7135                 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
7136         }
7137 }
7138
7139 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES                 \
7140         (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP |                \
7141          FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ |               \
7142          FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ |                \
7143          FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC |              \
7144          FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
7145
7146 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
7147 {
7148         struct hwrm_func_backing_store_cfg_input *req;
7149         struct bnxt_ctx_mem_info *ctx = bp->ctx;
7150         struct bnxt_ctx_pg_info *ctx_pg;
7151         void **__req = (void **)&req;
7152         u32 req_len = sizeof(*req);
7153         __le32 *num_entries;
7154         __le64 *pg_dir;
7155         u32 flags = 0;
7156         u8 *pg_attr;
7157         u32 ena;
7158         int rc;
7159         int i;
7160
7161         if (!ctx)
7162                 return 0;
7163
7164         if (req_len > bp->hwrm_max_ext_req_len)
7165                 req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
7166         rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len);
7167         if (rc)
7168                 return rc;
7169
7170         req->enables = cpu_to_le32(enables);
7171         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
7172                 ctx_pg = &ctx->qp_mem;
7173                 req->qp_num_entries = cpu_to_le32(ctx_pg->entries);
7174                 req->qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
7175                 req->qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
7176                 req->qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
7177                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7178                                       &req->qpc_pg_size_qpc_lvl,
7179                                       &req->qpc_page_dir);
7180         }
7181         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
7182                 ctx_pg = &ctx->srq_mem;
7183                 req->srq_num_entries = cpu_to_le32(ctx_pg->entries);
7184                 req->srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
7185                 req->srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
7186                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7187                                       &req->srq_pg_size_srq_lvl,
7188                                       &req->srq_page_dir);
7189         }
7190         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
7191                 ctx_pg = &ctx->cq_mem;
7192                 req->cq_num_entries = cpu_to_le32(ctx_pg->entries);
7193                 req->cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
7194                 req->cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
7195                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7196                                       &req->cq_pg_size_cq_lvl,
7197                                       &req->cq_page_dir);
7198         }
7199         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
7200                 ctx_pg = &ctx->vnic_mem;
7201                 req->vnic_num_vnic_entries =
7202                         cpu_to_le16(ctx->vnic_max_vnic_entries);
7203                 req->vnic_num_ring_table_entries =
7204                         cpu_to_le16(ctx->vnic_max_ring_table_entries);
7205                 req->vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
7206                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7207                                       &req->vnic_pg_size_vnic_lvl,
7208                                       &req->vnic_page_dir);
7209         }
7210         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
7211                 ctx_pg = &ctx->stat_mem;
7212                 req->stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
7213                 req->stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
7214                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7215                                       &req->stat_pg_size_stat_lvl,
7216                                       &req->stat_page_dir);
7217         }
7218         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
7219                 ctx_pg = &ctx->mrav_mem;
7220                 req->mrav_num_entries = cpu_to_le32(ctx_pg->entries);
7221                 if (ctx->mrav_num_entries_units)
7222                         flags |=
7223                         FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
7224                 req->mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
7225                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7226                                       &req->mrav_pg_size_mrav_lvl,
7227                                       &req->mrav_page_dir);
7228         }
7229         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
7230                 ctx_pg = &ctx->tim_mem;
7231                 req->tim_num_entries = cpu_to_le32(ctx_pg->entries);
7232                 req->tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
7233                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7234                                       &req->tim_pg_size_tim_lvl,
7235                                       &req->tim_page_dir);
7236         }
7237         for (i = 0, num_entries = &req->tqm_sp_num_entries,
7238              pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl,
7239              pg_dir = &req->tqm_sp_page_dir,
7240              ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
7241              i < BNXT_MAX_TQM_RINGS;
7242              i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
7243                 if (!(enables & ena))
7244                         continue;
7245
7246                 req->tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
7247                 ctx_pg = ctx->tqm_mem[i];
7248                 *num_entries = cpu_to_le32(ctx_pg->entries);
7249                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
7250         }
7251         req->flags = cpu_to_le32(flags);
7252         return hwrm_req_send(bp, req);
7253 }
7254
7255 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
7256                                   struct bnxt_ctx_pg_info *ctx_pg)
7257 {
7258         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7259
7260         rmem->page_size = BNXT_PAGE_SIZE;
7261         rmem->pg_arr = ctx_pg->ctx_pg_arr;
7262         rmem->dma_arr = ctx_pg->ctx_dma_arr;
7263         rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
7264         if (rmem->depth >= 1)
7265                 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
7266         return bnxt_alloc_ring(bp, rmem);
7267 }
7268
7269 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
7270                                   struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
7271                                   u8 depth, struct bnxt_mem_init *mem_init)
7272 {
7273         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7274         int rc;
7275
7276         if (!mem_size)
7277                 return -EINVAL;
7278
7279         ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7280         if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
7281                 ctx_pg->nr_pages = 0;
7282                 return -EINVAL;
7283         }
7284         if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
7285                 int nr_tbls, i;
7286
7287                 rmem->depth = 2;
7288                 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
7289                                              GFP_KERNEL);
7290                 if (!ctx_pg->ctx_pg_tbl)
7291                         return -ENOMEM;
7292                 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
7293                 rmem->nr_pages = nr_tbls;
7294                 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7295                 if (rc)
7296                         return rc;
7297                 for (i = 0; i < nr_tbls; i++) {
7298                         struct bnxt_ctx_pg_info *pg_tbl;
7299
7300                         pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
7301                         if (!pg_tbl)
7302                                 return -ENOMEM;
7303                         ctx_pg->ctx_pg_tbl[i] = pg_tbl;
7304                         rmem = &pg_tbl->ring_mem;
7305                         rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
7306                         rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
7307                         rmem->depth = 1;
7308                         rmem->nr_pages = MAX_CTX_PAGES;
7309                         rmem->mem_init = mem_init;
7310                         if (i == (nr_tbls - 1)) {
7311                                 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
7312
7313                                 if (rem)
7314                                         rmem->nr_pages = rem;
7315                         }
7316                         rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
7317                         if (rc)
7318                                 break;
7319                 }
7320         } else {
7321                 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7322                 if (rmem->nr_pages > 1 || depth)
7323                         rmem->depth = 1;
7324                 rmem->mem_init = mem_init;
7325                 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7326         }
7327         return rc;
7328 }
7329
7330 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
7331                                   struct bnxt_ctx_pg_info *ctx_pg)
7332 {
7333         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7334
7335         if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
7336             ctx_pg->ctx_pg_tbl) {
7337                 int i, nr_tbls = rmem->nr_pages;
7338
7339                 for (i = 0; i < nr_tbls; i++) {
7340                         struct bnxt_ctx_pg_info *pg_tbl;
7341                         struct bnxt_ring_mem_info *rmem2;
7342
7343                         pg_tbl = ctx_pg->ctx_pg_tbl[i];
7344                         if (!pg_tbl)
7345                                 continue;
7346                         rmem2 = &pg_tbl->ring_mem;
7347                         bnxt_free_ring(bp, rmem2);
7348                         ctx_pg->ctx_pg_arr[i] = NULL;
7349                         kfree(pg_tbl);
7350                         ctx_pg->ctx_pg_tbl[i] = NULL;
7351                 }
7352                 kfree(ctx_pg->ctx_pg_tbl);
7353                 ctx_pg->ctx_pg_tbl = NULL;
7354         }
7355         bnxt_free_ring(bp, rmem);
7356         ctx_pg->nr_pages = 0;
7357 }
7358
7359 void bnxt_free_ctx_mem(struct bnxt *bp)
7360 {
7361         struct bnxt_ctx_mem_info *ctx = bp->ctx;
7362         int i;
7363
7364         if (!ctx)
7365                 return;
7366
7367         if (ctx->tqm_mem[0]) {
7368                 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
7369                         bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
7370                 kfree(ctx->tqm_mem[0]);
7371                 ctx->tqm_mem[0] = NULL;
7372         }
7373
7374         bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
7375         bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
7376         bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
7377         bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
7378         bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
7379         bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
7380         bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
7381         ctx->flags &= ~BNXT_CTX_FLAG_INITED;
7382 }
7383
7384 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
7385 {
7386         struct bnxt_ctx_pg_info *ctx_pg;
7387         struct bnxt_ctx_mem_info *ctx;
7388         struct bnxt_mem_init *init;
7389         u32 mem_size, ena, entries;
7390         u32 entries_sp, min;
7391         u32 num_mr, num_ah;
7392         u32 extra_srqs = 0;
7393         u32 extra_qps = 0;
7394         u8 pg_lvl = 1;
7395         int i, rc;
7396
7397         rc = bnxt_hwrm_func_backing_store_qcaps(bp);
7398         if (rc) {
7399                 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
7400                            rc);
7401                 return rc;
7402         }
7403         ctx = bp->ctx;
7404         if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
7405                 return 0;
7406
7407         if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
7408                 pg_lvl = 2;
7409                 extra_qps = 65536;
7410                 extra_srqs = 8192;
7411         }
7412
7413         ctx_pg = &ctx->qp_mem;
7414         ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
7415                           extra_qps;
7416         if (ctx->qp_entry_size) {
7417                 mem_size = ctx->qp_entry_size * ctx_pg->entries;
7418                 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
7419                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7420                 if (rc)
7421                         return rc;
7422         }
7423
7424         ctx_pg = &ctx->srq_mem;
7425         ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
7426         if (ctx->srq_entry_size) {
7427                 mem_size = ctx->srq_entry_size * ctx_pg->entries;
7428                 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ];
7429                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7430                 if (rc)
7431                         return rc;
7432         }
7433
7434         ctx_pg = &ctx->cq_mem;
7435         ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
7436         if (ctx->cq_entry_size) {
7437                 mem_size = ctx->cq_entry_size * ctx_pg->entries;
7438                 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_CQ];
7439                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7440                 if (rc)
7441                         return rc;
7442         }
7443
7444         ctx_pg = &ctx->vnic_mem;
7445         ctx_pg->entries = ctx->vnic_max_vnic_entries +
7446                           ctx->vnic_max_ring_table_entries;
7447         if (ctx->vnic_entry_size) {
7448                 mem_size = ctx->vnic_entry_size * ctx_pg->entries;
7449                 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC];
7450                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
7451                 if (rc)
7452                         return rc;
7453         }
7454
7455         ctx_pg = &ctx->stat_mem;
7456         ctx_pg->entries = ctx->stat_max_entries;
7457         if (ctx->stat_entry_size) {
7458                 mem_size = ctx->stat_entry_size * ctx_pg->entries;
7459                 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_STAT];
7460                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
7461                 if (rc)
7462                         return rc;
7463         }
7464
7465         ena = 0;
7466         if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
7467                 goto skip_rdma;
7468
7469         ctx_pg = &ctx->mrav_mem;
7470         /* 128K extra is needed to accommodate static AH context
7471          * allocation by f/w.
7472          */
7473         num_mr = 1024 * 256;
7474         num_ah = 1024 * 128;
7475         ctx_pg->entries = num_mr + num_ah;
7476         if (ctx->mrav_entry_size) {
7477                 mem_size = ctx->mrav_entry_size * ctx_pg->entries;
7478                 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV];
7479                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, init);
7480                 if (rc)
7481                         return rc;
7482         }
7483         ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
7484         if (ctx->mrav_num_entries_units)
7485                 ctx_pg->entries =
7486                         ((num_mr / ctx->mrav_num_entries_units) << 16) |
7487                          (num_ah / ctx->mrav_num_entries_units);
7488
7489         ctx_pg = &ctx->tim_mem;
7490         ctx_pg->entries = ctx->qp_mem.entries;
7491         if (ctx->tim_entry_size) {
7492                 mem_size = ctx->tim_entry_size * ctx_pg->entries;
7493                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, NULL);
7494                 if (rc)
7495                         return rc;
7496         }
7497         ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
7498
7499 skip_rdma:
7500         min = ctx->tqm_min_entries_per_ring;
7501         entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
7502                      2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
7503         entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
7504         entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries);
7505         entries = roundup(entries, ctx->tqm_entries_multiple);
7506         entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
7507         for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
7508                 ctx_pg = ctx->tqm_mem[i];
7509                 ctx_pg->entries = i ? entries : entries_sp;
7510                 if (ctx->tqm_entry_size) {
7511                         mem_size = ctx->tqm_entry_size * ctx_pg->entries;
7512                         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1,
7513                                                     NULL);
7514                         if (rc)
7515                                 return rc;
7516                 }
7517                 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
7518         }
7519         ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
7520         rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
7521         if (rc) {
7522                 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
7523                            rc);
7524                 return rc;
7525         }
7526         ctx->flags |= BNXT_CTX_FLAG_INITED;
7527         return 0;
7528 }
7529
7530 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
7531 {
7532         struct hwrm_func_resource_qcaps_output *resp;
7533         struct hwrm_func_resource_qcaps_input *req;
7534         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7535         int rc;
7536
7537         rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS);
7538         if (rc)
7539                 return rc;
7540
7541         req->fid = cpu_to_le16(0xffff);
7542         resp = hwrm_req_hold(bp, req);
7543         rc = hwrm_req_send_silent(bp, req);
7544         if (rc)
7545                 goto hwrm_func_resc_qcaps_exit;
7546
7547         hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
7548         if (!all)
7549                 goto hwrm_func_resc_qcaps_exit;
7550
7551         hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
7552         hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7553         hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
7554         hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7555         hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
7556         hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7557         hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
7558         hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7559         hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
7560         hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
7561         hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
7562         hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7563         hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
7564         hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7565         hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
7566         hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7567
7568         if (bp->flags & BNXT_FLAG_CHIP_P5) {
7569                 u16 max_msix = le16_to_cpu(resp->max_msix);
7570
7571                 hw_resc->max_nqs = max_msix;
7572                 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
7573         }
7574
7575         if (BNXT_PF(bp)) {
7576                 struct bnxt_pf_info *pf = &bp->pf;
7577
7578                 pf->vf_resv_strategy =
7579                         le16_to_cpu(resp->vf_reservation_strategy);
7580                 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
7581                         pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
7582         }
7583 hwrm_func_resc_qcaps_exit:
7584         hwrm_req_drop(bp, req);
7585         return rc;
7586 }
7587
7588 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
7589 {
7590         struct hwrm_port_mac_ptp_qcfg_output *resp;
7591         struct hwrm_port_mac_ptp_qcfg_input *req;
7592         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
7593         bool phc_cfg;
7594         u8 flags;
7595         int rc;
7596
7597         if (bp->hwrm_spec_code < 0x10801) {
7598                 rc = -ENODEV;
7599                 goto no_ptp;
7600         }
7601
7602         rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG);
7603         if (rc)
7604                 goto no_ptp;
7605
7606         req->port_id = cpu_to_le16(bp->pf.port_id);
7607         resp = hwrm_req_hold(bp, req);
7608         rc = hwrm_req_send(bp, req);
7609         if (rc)
7610                 goto exit;
7611
7612         flags = resp->flags;
7613         if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
7614                 rc = -ENODEV;
7615                 goto exit;
7616         }
7617         if (!ptp) {
7618                 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
7619                 if (!ptp) {
7620                         rc = -ENOMEM;
7621                         goto exit;
7622                 }
7623                 ptp->bp = bp;
7624                 bp->ptp_cfg = ptp;
7625         }
7626         if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) {
7627                 ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
7628                 ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
7629         } else if (bp->flags & BNXT_FLAG_CHIP_P5) {
7630                 ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
7631                 ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
7632         } else {
7633                 rc = -ENODEV;
7634                 goto exit;
7635         }
7636         phc_cfg = (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0;
7637         rc = bnxt_ptp_init(bp, phc_cfg);
7638         if (rc)
7639                 netdev_warn(bp->dev, "PTP initialization failed.\n");
7640 exit:
7641         hwrm_req_drop(bp, req);
7642         if (!rc)
7643                 return 0;
7644
7645 no_ptp:
7646         bnxt_ptp_clear(bp);
7647         kfree(ptp);
7648         bp->ptp_cfg = NULL;
7649         return rc;
7650 }
7651
7652 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
7653 {
7654         struct hwrm_func_qcaps_output *resp;
7655         struct hwrm_func_qcaps_input *req;
7656         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7657         u32 flags, flags_ext, flags_ext2;
7658         int rc;
7659
7660         rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
7661         if (rc)
7662                 return rc;
7663
7664         req->fid = cpu_to_le16(0xffff);
7665         resp = hwrm_req_hold(bp, req);
7666         rc = hwrm_req_send(bp, req);
7667         if (rc)
7668                 goto hwrm_func_qcaps_exit;
7669
7670         flags = le32_to_cpu(resp->flags);
7671         if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
7672                 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
7673         if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
7674                 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
7675         if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
7676                 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
7677         if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
7678                 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
7679         if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
7680                 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
7681         if (flags &  FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
7682                 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
7683         if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
7684                 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
7685         if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
7686                 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
7687         if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED)
7688                 bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS;
7689
7690         flags_ext = le32_to_cpu(resp->flags_ext);
7691         if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
7692                 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
7693         if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
7694                 bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
7695         if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED)
7696                 bp->fw_cap |= BNXT_FW_CAP_PTP_RTC;
7697         if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
7698                 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
7699         if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
7700                 bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
7701
7702         flags_ext2 = le32_to_cpu(resp->flags_ext2);
7703         if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED)
7704                 bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
7705
7706         bp->tx_push_thresh = 0;
7707         if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
7708             BNXT_FW_MAJ(bp) > 217)
7709                 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
7710
7711         hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7712         hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7713         hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7714         hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7715         hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
7716         if (!hw_resc->max_hw_ring_grps)
7717                 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
7718         hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7719         hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7720         hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7721
7722         if (BNXT_PF(bp)) {
7723                 struct bnxt_pf_info *pf = &bp->pf;
7724
7725                 pf->fw_fid = le16_to_cpu(resp->fid);
7726                 pf->port_id = le16_to_cpu(resp->port_id);
7727                 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
7728                 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
7729                 pf->max_vfs = le16_to_cpu(resp->max_vfs);
7730                 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
7731                 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
7732                 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
7733                 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
7734                 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
7735                 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
7736                 bp->flags &= ~BNXT_FLAG_WOL_CAP;
7737                 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
7738                         bp->flags |= BNXT_FLAG_WOL_CAP;
7739                 if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
7740                         __bnxt_hwrm_ptp_qcfg(bp);
7741                 } else {
7742                         bnxt_ptp_clear(bp);
7743                         kfree(bp->ptp_cfg);
7744                         bp->ptp_cfg = NULL;
7745                 }
7746         } else {
7747 #ifdef CONFIG_BNXT_SRIOV
7748                 struct bnxt_vf_info *vf = &bp->vf;
7749
7750                 vf->fw_fid = le16_to_cpu(resp->fid);
7751                 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
7752 #endif
7753         }
7754
7755 hwrm_func_qcaps_exit:
7756         hwrm_req_drop(bp, req);
7757         return rc;
7758 }
7759
7760 static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp)
7761 {
7762         struct hwrm_dbg_qcaps_output *resp;
7763         struct hwrm_dbg_qcaps_input *req;
7764         int rc;
7765
7766         bp->fw_dbg_cap = 0;
7767         if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS))
7768                 return;
7769
7770         rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS);
7771         if (rc)
7772                 return;
7773
7774         req->fid = cpu_to_le16(0xffff);
7775         resp = hwrm_req_hold(bp, req);
7776         rc = hwrm_req_send(bp, req);
7777         if (rc)
7778                 goto hwrm_dbg_qcaps_exit;
7779
7780         bp->fw_dbg_cap = le32_to_cpu(resp->flags);
7781
7782 hwrm_dbg_qcaps_exit:
7783         hwrm_req_drop(bp, req);
7784 }
7785
7786 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
7787
7788 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
7789 {
7790         int rc;
7791
7792         rc = __bnxt_hwrm_func_qcaps(bp);
7793         if (rc)
7794                 return rc;
7795
7796         bnxt_hwrm_dbg_qcaps(bp);
7797
7798         rc = bnxt_hwrm_queue_qportcfg(bp);
7799         if (rc) {
7800                 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
7801                 return rc;
7802         }
7803         if (bp->hwrm_spec_code >= 0x10803) {
7804                 rc = bnxt_alloc_ctx_mem(bp);
7805                 if (rc)
7806                         return rc;
7807                 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
7808                 if (!rc)
7809                         bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
7810         }
7811         return 0;
7812 }
7813
7814 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
7815 {
7816         struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
7817         struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req;
7818         u32 flags;
7819         int rc;
7820
7821         if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
7822                 return 0;
7823
7824         rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS);
7825         if (rc)
7826                 return rc;
7827
7828         resp = hwrm_req_hold(bp, req);
7829         rc = hwrm_req_send(bp, req);
7830         if (rc)
7831                 goto hwrm_cfa_adv_qcaps_exit;
7832
7833         flags = le32_to_cpu(resp->flags);
7834         if (flags &
7835             CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
7836                 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
7837
7838 hwrm_cfa_adv_qcaps_exit:
7839         hwrm_req_drop(bp, req);
7840         return rc;
7841 }
7842
7843 static int __bnxt_alloc_fw_health(struct bnxt *bp)
7844 {
7845         if (bp->fw_health)
7846                 return 0;
7847
7848         bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
7849         if (!bp->fw_health)
7850                 return -ENOMEM;
7851
7852         mutex_init(&bp->fw_health->lock);
7853         return 0;
7854 }
7855
7856 static int bnxt_alloc_fw_health(struct bnxt *bp)
7857 {
7858         int rc;
7859
7860         if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
7861             !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7862                 return 0;
7863
7864         rc = __bnxt_alloc_fw_health(bp);
7865         if (rc) {
7866                 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
7867                 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7868                 return rc;
7869         }
7870
7871         return 0;
7872 }
7873
7874 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
7875 {
7876         writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
7877                                          BNXT_GRCPF_REG_WINDOW_BASE_OUT +
7878                                          BNXT_FW_HEALTH_WIN_MAP_OFF);
7879 }
7880
7881 static void bnxt_inv_fw_health_reg(struct bnxt *bp)
7882 {
7883         struct bnxt_fw_health *fw_health = bp->fw_health;
7884         u32 reg_type;
7885
7886         if (!fw_health)
7887                 return;
7888
7889         reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
7890         if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
7891                 fw_health->status_reliable = false;
7892
7893         reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]);
7894         if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
7895                 fw_health->resets_reliable = false;
7896 }
7897
7898 static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
7899 {
7900         void __iomem *hs;
7901         u32 status_loc;
7902         u32 reg_type;
7903         u32 sig;
7904
7905         if (bp->fw_health)
7906                 bp->fw_health->status_reliable = false;
7907
7908         __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
7909         hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
7910
7911         sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
7912         if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
7913                 if (!bp->chip_num) {
7914                         __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
7915                         bp->chip_num = readl(bp->bar0 +
7916                                              BNXT_FW_HEALTH_WIN_BASE +
7917                                              BNXT_GRC_REG_CHIP_NUM);
7918                 }
7919                 if (!BNXT_CHIP_P5(bp))
7920                         return;
7921
7922                 status_loc = BNXT_GRC_REG_STATUS_P5 |
7923                              BNXT_FW_HEALTH_REG_TYPE_BAR0;
7924         } else {
7925                 status_loc = readl(hs + offsetof(struct hcomm_status,
7926                                                  fw_status_loc));
7927         }
7928
7929         if (__bnxt_alloc_fw_health(bp)) {
7930                 netdev_warn(bp->dev, "no memory for firmware status checks\n");
7931                 return;
7932         }
7933
7934         bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
7935         reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
7936         if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
7937                 __bnxt_map_fw_health_reg(bp, status_loc);
7938                 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
7939                         BNXT_FW_HEALTH_WIN_OFF(status_loc);
7940         }
7941
7942         bp->fw_health->status_reliable = true;
7943 }
7944
7945 static int bnxt_map_fw_health_regs(struct bnxt *bp)
7946 {
7947         struct bnxt_fw_health *fw_health = bp->fw_health;
7948         u32 reg_base = 0xffffffff;
7949         int i;
7950
7951         bp->fw_health->status_reliable = false;
7952         bp->fw_health->resets_reliable = false;
7953         /* Only pre-map the monitoring GRC registers using window 3 */
7954         for (i = 0; i < 4; i++) {
7955                 u32 reg = fw_health->regs[i];
7956
7957                 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
7958                         continue;
7959                 if (reg_base == 0xffffffff)
7960                         reg_base = reg & BNXT_GRC_BASE_MASK;
7961                 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
7962                         return -ERANGE;
7963                 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
7964         }
7965         bp->fw_health->status_reliable = true;
7966         bp->fw_health->resets_reliable = true;
7967         if (reg_base == 0xffffffff)
7968                 return 0;
7969
7970         __bnxt_map_fw_health_reg(bp, reg_base);
7971         return 0;
7972 }
7973
7974 static void bnxt_remap_fw_health_regs(struct bnxt *bp)
7975 {
7976         if (!bp->fw_health)
7977                 return;
7978
7979         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) {
7980                 bp->fw_health->status_reliable = true;
7981                 bp->fw_health->resets_reliable = true;
7982         } else {
7983                 bnxt_try_map_fw_health_reg(bp);
7984         }
7985 }
7986
7987 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
7988 {
7989         struct bnxt_fw_health *fw_health = bp->fw_health;
7990         struct hwrm_error_recovery_qcfg_output *resp;
7991         struct hwrm_error_recovery_qcfg_input *req;
7992         int rc, i;
7993
7994         if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7995                 return 0;
7996
7997         rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG);
7998         if (rc)
7999                 return rc;
8000
8001         resp = hwrm_req_hold(bp, req);
8002         rc = hwrm_req_send(bp, req);
8003         if (rc)
8004                 goto err_recovery_out;
8005         fw_health->flags = le32_to_cpu(resp->flags);
8006         if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
8007             !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
8008                 rc = -EINVAL;
8009                 goto err_recovery_out;
8010         }
8011         fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
8012         fw_health->master_func_wait_dsecs =
8013                 le32_to_cpu(resp->master_func_wait_period);
8014         fw_health->normal_func_wait_dsecs =
8015                 le32_to_cpu(resp->normal_func_wait_period);
8016         fw_health->post_reset_wait_dsecs =
8017                 le32_to_cpu(resp->master_func_wait_period_after_reset);
8018         fw_health->post_reset_max_wait_dsecs =
8019                 le32_to_cpu(resp->max_bailout_time_after_reset);
8020         fw_health->regs[BNXT_FW_HEALTH_REG] =
8021                 le32_to_cpu(resp->fw_health_status_reg);
8022         fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
8023                 le32_to_cpu(resp->fw_heartbeat_reg);
8024         fw_health->regs[BNXT_FW_RESET_CNT_REG] =
8025                 le32_to_cpu(resp->fw_reset_cnt_reg);
8026         fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
8027                 le32_to_cpu(resp->reset_inprogress_reg);
8028         fw_health->fw_reset_inprog_reg_mask =
8029                 le32_to_cpu(resp->reset_inprogress_reg_mask);
8030         fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
8031         if (fw_health->fw_reset_seq_cnt >= 16) {
8032                 rc = -EINVAL;
8033                 goto err_recovery_out;
8034         }
8035         for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
8036                 fw_health->fw_reset_seq_regs[i] =
8037                         le32_to_cpu(resp->reset_reg[i]);
8038                 fw_health->fw_reset_seq_vals[i] =
8039                         le32_to_cpu(resp->reset_reg_val[i]);
8040                 fw_health->fw_reset_seq_delay_msec[i] =
8041                         resp->delay_after_reset[i];
8042         }
8043 err_recovery_out:
8044         hwrm_req_drop(bp, req);
8045         if (!rc)
8046                 rc = bnxt_map_fw_health_regs(bp);
8047         if (rc)
8048                 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
8049         return rc;
8050 }
8051
8052 static int bnxt_hwrm_func_reset(struct bnxt *bp)
8053 {
8054         struct hwrm_func_reset_input *req;
8055         int rc;
8056
8057         rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET);
8058         if (rc)
8059                 return rc;
8060
8061         req->enables = 0;
8062         hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT);
8063         return hwrm_req_send(bp, req);
8064 }
8065
8066 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
8067 {
8068         struct hwrm_nvm_get_dev_info_output nvm_info;
8069
8070         if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
8071                 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
8072                          nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
8073                          nvm_info.nvm_cfg_ver_upd);
8074 }
8075
8076 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
8077 {
8078         struct hwrm_queue_qportcfg_output *resp;
8079         struct hwrm_queue_qportcfg_input *req;
8080         u8 i, j, *qptr;
8081         bool no_rdma;
8082         int rc = 0;
8083
8084         rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG);
8085         if (rc)
8086                 return rc;
8087
8088         resp = hwrm_req_hold(bp, req);
8089         rc = hwrm_req_send(bp, req);
8090         if (rc)
8091                 goto qportcfg_exit;
8092
8093         if (!resp->max_configurable_queues) {
8094                 rc = -EINVAL;
8095                 goto qportcfg_exit;
8096         }
8097         bp->max_tc = resp->max_configurable_queues;
8098         bp->max_lltc = resp->max_configurable_lossless_queues;
8099         if (bp->max_tc > BNXT_MAX_QUEUE)
8100                 bp->max_tc = BNXT_MAX_QUEUE;
8101
8102         no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
8103         qptr = &resp->queue_id0;
8104         for (i = 0, j = 0; i < bp->max_tc; i++) {
8105                 bp->q_info[j].queue_id = *qptr;
8106                 bp->q_ids[i] = *qptr++;
8107                 bp->q_info[j].queue_profile = *qptr++;
8108                 bp->tc_to_qidx[j] = j;
8109                 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
8110                     (no_rdma && BNXT_PF(bp)))
8111                         j++;
8112         }
8113         bp->max_q = bp->max_tc;
8114         bp->max_tc = max_t(u8, j, 1);
8115
8116         if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
8117                 bp->max_tc = 1;
8118
8119         if (bp->max_lltc > bp->max_tc)
8120                 bp->max_lltc = bp->max_tc;
8121
8122 qportcfg_exit:
8123         hwrm_req_drop(bp, req);
8124         return rc;
8125 }
8126
8127 static int bnxt_hwrm_poll(struct bnxt *bp)
8128 {
8129         struct hwrm_ver_get_input *req;
8130         int rc;
8131
8132         rc = hwrm_req_init(bp, req, HWRM_VER_GET);
8133         if (rc)
8134                 return rc;
8135
8136         req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
8137         req->hwrm_intf_min = HWRM_VERSION_MINOR;
8138         req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
8139
8140         hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT);
8141         rc = hwrm_req_send(bp, req);
8142         return rc;
8143 }
8144
8145 static int bnxt_hwrm_ver_get(struct bnxt *bp)
8146 {
8147         struct hwrm_ver_get_output *resp;
8148         struct hwrm_ver_get_input *req;
8149         u16 fw_maj, fw_min, fw_bld, fw_rsv;
8150         u32 dev_caps_cfg, hwrm_ver;
8151         int rc, len;
8152
8153         rc = hwrm_req_init(bp, req, HWRM_VER_GET);
8154         if (rc)
8155                 return rc;
8156
8157         hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
8158         bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
8159         req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
8160         req->hwrm_intf_min = HWRM_VERSION_MINOR;
8161         req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
8162
8163         resp = hwrm_req_hold(bp, req);
8164         rc = hwrm_req_send(bp, req);
8165         if (rc)
8166                 goto hwrm_ver_get_exit;
8167
8168         memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
8169
8170         bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
8171                              resp->hwrm_intf_min_8b << 8 |
8172                              resp->hwrm_intf_upd_8b;
8173         if (resp->hwrm_intf_maj_8b < 1) {
8174                 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
8175                             resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
8176                             resp->hwrm_intf_upd_8b);
8177                 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
8178         }
8179
8180         hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
8181                         HWRM_VERSION_UPDATE;
8182
8183         if (bp->hwrm_spec_code > hwrm_ver)
8184                 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
8185                          HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
8186                          HWRM_VERSION_UPDATE);
8187         else
8188                 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
8189                          resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
8190                          resp->hwrm_intf_upd_8b);
8191
8192         fw_maj = le16_to_cpu(resp->hwrm_fw_major);
8193         if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
8194                 fw_min = le16_to_cpu(resp->hwrm_fw_minor);
8195                 fw_bld = le16_to_cpu(resp->hwrm_fw_build);
8196                 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
8197                 len = FW_VER_STR_LEN;
8198         } else {
8199                 fw_maj = resp->hwrm_fw_maj_8b;
8200                 fw_min = resp->hwrm_fw_min_8b;
8201                 fw_bld = resp->hwrm_fw_bld_8b;
8202                 fw_rsv = resp->hwrm_fw_rsvd_8b;
8203                 len = BC_HWRM_STR_LEN;
8204         }
8205         bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
8206         snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
8207                  fw_rsv);
8208
8209         if (strlen(resp->active_pkg_name)) {
8210                 int fw_ver_len = strlen(bp->fw_ver_str);
8211
8212                 snprintf(bp->fw_ver_str + fw_ver_len,
8213                          FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
8214                          resp->active_pkg_name);
8215                 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
8216         }
8217
8218         bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
8219         if (!bp->hwrm_cmd_timeout)
8220                 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
8221         bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
8222         if (!bp->hwrm_cmd_max_timeout)
8223                 bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT;
8224         else if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT)
8225                 netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog\n",
8226                             bp->hwrm_cmd_max_timeout / 1000);
8227
8228         if (resp->hwrm_intf_maj_8b >= 1) {
8229                 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
8230                 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
8231         }
8232         if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
8233                 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
8234
8235         bp->chip_num = le16_to_cpu(resp->chip_num);
8236         bp->chip_rev = resp->chip_rev;
8237         if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
8238             !resp->chip_metal)
8239                 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
8240
8241         dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
8242         if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
8243             (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
8244                 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
8245
8246         if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
8247                 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
8248
8249         if (dev_caps_cfg &
8250             VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
8251                 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
8252
8253         if (dev_caps_cfg &
8254             VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
8255                 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
8256
8257         if (dev_caps_cfg &
8258             VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
8259                 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
8260
8261 hwrm_ver_get_exit:
8262         hwrm_req_drop(bp, req);
8263         return rc;
8264 }
8265
8266 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
8267 {
8268         struct hwrm_fw_set_time_input *req;
8269         struct tm tm;
8270         time64_t now = ktime_get_real_seconds();
8271         int rc;
8272
8273         if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
8274             bp->hwrm_spec_code < 0x10400)
8275                 return -EOPNOTSUPP;
8276
8277         time64_to_tm(now, 0, &tm);
8278         rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME);
8279         if (rc)
8280                 return rc;
8281
8282         req->year = cpu_to_le16(1900 + tm.tm_year);
8283         req->month = 1 + tm.tm_mon;
8284         req->day = tm.tm_mday;
8285         req->hour = tm.tm_hour;
8286         req->minute = tm.tm_min;
8287         req->second = tm.tm_sec;
8288         return hwrm_req_send(bp, req);
8289 }
8290
8291 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
8292 {
8293         u64 sw_tmp;
8294
8295         hw &= mask;
8296         sw_tmp = (*sw & ~mask) | hw;
8297         if (hw < (*sw & mask))
8298                 sw_tmp += mask + 1;
8299         WRITE_ONCE(*sw, sw_tmp);
8300 }
8301
8302 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
8303                                     int count, bool ignore_zero)
8304 {
8305         int i;
8306
8307         for (i = 0; i < count; i++) {
8308                 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
8309
8310                 if (ignore_zero && !hw)
8311                         continue;
8312
8313                 if (masks[i] == -1ULL)
8314                         sw_stats[i] = hw;
8315                 else
8316                         bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
8317         }
8318 }
8319
8320 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
8321 {
8322         if (!stats->hw_stats)
8323                 return;
8324
8325         __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
8326                                 stats->hw_masks, stats->len / 8, false);
8327 }
8328
8329 static void bnxt_accumulate_all_stats(struct bnxt *bp)
8330 {
8331         struct bnxt_stats_mem *ring0_stats;
8332         bool ignore_zero = false;
8333         int i;
8334
8335         /* Chip bug.  Counter intermittently becomes 0. */
8336         if (bp->flags & BNXT_FLAG_CHIP_P5)
8337                 ignore_zero = true;
8338
8339         for (i = 0; i < bp->cp_nr_rings; i++) {
8340                 struct bnxt_napi *bnapi = bp->bnapi[i];
8341                 struct bnxt_cp_ring_info *cpr;
8342                 struct bnxt_stats_mem *stats;
8343
8344                 cpr = &bnapi->cp_ring;
8345                 stats = &cpr->stats;
8346                 if (!i)
8347                         ring0_stats = stats;
8348                 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
8349                                         ring0_stats->hw_masks,
8350                                         ring0_stats->len / 8, ignore_zero);
8351         }
8352         if (bp->flags & BNXT_FLAG_PORT_STATS) {
8353                 struct bnxt_stats_mem *stats = &bp->port_stats;
8354                 __le64 *hw_stats = stats->hw_stats;
8355                 u64 *sw_stats = stats->sw_stats;
8356                 u64 *masks = stats->hw_masks;
8357                 int cnt;
8358
8359                 cnt = sizeof(struct rx_port_stats) / 8;
8360                 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8361
8362                 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8363                 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8364                 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8365                 cnt = sizeof(struct tx_port_stats) / 8;
8366                 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8367         }
8368         if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
8369                 bnxt_accumulate_stats(&bp->rx_port_stats_ext);
8370                 bnxt_accumulate_stats(&bp->tx_port_stats_ext);
8371         }
8372 }
8373
8374 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
8375 {
8376         struct hwrm_port_qstats_input *req;
8377         struct bnxt_pf_info *pf = &bp->pf;
8378         int rc;
8379
8380         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
8381                 return 0;
8382
8383         if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8384                 return -EOPNOTSUPP;
8385
8386         rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS);
8387         if (rc)
8388                 return rc;
8389
8390         req->flags = flags;
8391         req->port_id = cpu_to_le16(pf->port_id);
8392         req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
8393                                             BNXT_TX_PORT_STATS_BYTE_OFFSET);
8394         req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
8395         return hwrm_req_send(bp, req);
8396 }
8397
8398 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
8399 {
8400         struct hwrm_queue_pri2cos_qcfg_output *resp_qc;
8401         struct hwrm_queue_pri2cos_qcfg_input *req_qc;
8402         struct hwrm_port_qstats_ext_output *resp_qs;
8403         struct hwrm_port_qstats_ext_input *req_qs;
8404         struct bnxt_pf_info *pf = &bp->pf;
8405         u32 tx_stat_size;
8406         int rc;
8407
8408         if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
8409                 return 0;
8410
8411         if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8412                 return -EOPNOTSUPP;
8413
8414         rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT);
8415         if (rc)
8416                 return rc;
8417
8418         req_qs->flags = flags;
8419         req_qs->port_id = cpu_to_le16(pf->port_id);
8420         req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
8421         req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
8422         tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
8423                        sizeof(struct tx_port_stats_ext) : 0;
8424         req_qs->tx_stat_size = cpu_to_le16(tx_stat_size);
8425         req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
8426         resp_qs = hwrm_req_hold(bp, req_qs);
8427         rc = hwrm_req_send(bp, req_qs);
8428         if (!rc) {
8429                 bp->fw_rx_stats_ext_size =
8430                         le16_to_cpu(resp_qs->rx_stat_size) / 8;
8431                 if (BNXT_FW_MAJ(bp) < 220 &&
8432                     bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY)
8433                         bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY;
8434
8435                 bp->fw_tx_stats_ext_size = tx_stat_size ?
8436                         le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0;
8437         } else {
8438                 bp->fw_rx_stats_ext_size = 0;
8439                 bp->fw_tx_stats_ext_size = 0;
8440         }
8441         hwrm_req_drop(bp, req_qs);
8442
8443         if (flags)
8444                 return rc;
8445
8446         if (bp->fw_tx_stats_ext_size <=
8447             offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
8448                 bp->pri2cos_valid = 0;
8449                 return rc;
8450         }
8451
8452         rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG);
8453         if (rc)
8454                 return rc;
8455
8456         req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
8457
8458         resp_qc = hwrm_req_hold(bp, req_qc);
8459         rc = hwrm_req_send(bp, req_qc);
8460         if (!rc) {
8461                 u8 *pri2cos;
8462                 int i, j;
8463
8464                 pri2cos = &resp_qc->pri0_cos_queue_id;
8465                 for (i = 0; i < 8; i++) {
8466                         u8 queue_id = pri2cos[i];
8467                         u8 queue_idx;
8468
8469                         /* Per port queue IDs start from 0, 10, 20, etc */
8470                         queue_idx = queue_id % 10;
8471                         if (queue_idx > BNXT_MAX_QUEUE) {
8472                                 bp->pri2cos_valid = false;
8473                                 hwrm_req_drop(bp, req_qc);
8474                                 return rc;
8475                         }
8476                         for (j = 0; j < bp->max_q; j++) {
8477                                 if (bp->q_ids[j] == queue_id)
8478                                         bp->pri2cos_idx[i] = queue_idx;
8479                         }
8480                 }
8481                 bp->pri2cos_valid = true;
8482         }
8483         hwrm_req_drop(bp, req_qc);
8484
8485         return rc;
8486 }
8487
8488 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
8489 {
8490         bnxt_hwrm_tunnel_dst_port_free(bp,
8491                 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
8492         bnxt_hwrm_tunnel_dst_port_free(bp,
8493                 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
8494 }
8495
8496 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
8497 {
8498         int rc, i;
8499         u32 tpa_flags = 0;
8500
8501         if (set_tpa)
8502                 tpa_flags = bp->flags & BNXT_FLAG_TPA;
8503         else if (BNXT_NO_FW_ACCESS(bp))
8504                 return 0;
8505         for (i = 0; i < bp->nr_vnics; i++) {
8506                 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
8507                 if (rc) {
8508                         netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
8509                                    i, rc);
8510                         return rc;
8511                 }
8512         }
8513         return 0;
8514 }
8515
8516 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
8517 {
8518         int i;
8519
8520         for (i = 0; i < bp->nr_vnics; i++)
8521                 bnxt_hwrm_vnic_set_rss(bp, i, false);
8522 }
8523
8524 static void bnxt_clear_vnic(struct bnxt *bp)
8525 {
8526         if (!bp->vnic_info)
8527                 return;
8528
8529         bnxt_hwrm_clear_vnic_filter(bp);
8530         if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
8531                 /* clear all RSS setting before free vnic ctx */
8532                 bnxt_hwrm_clear_vnic_rss(bp);
8533                 bnxt_hwrm_vnic_ctx_free(bp);
8534         }
8535         /* before free the vnic, undo the vnic tpa settings */
8536         if (bp->flags & BNXT_FLAG_TPA)
8537                 bnxt_set_tpa(bp, false);
8538         bnxt_hwrm_vnic_free(bp);
8539         if (bp->flags & BNXT_FLAG_CHIP_P5)
8540                 bnxt_hwrm_vnic_ctx_free(bp);
8541 }
8542
8543 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
8544                                     bool irq_re_init)
8545 {
8546         bnxt_clear_vnic(bp);
8547         bnxt_hwrm_ring_free(bp, close_path);
8548         bnxt_hwrm_ring_grp_free(bp);
8549         if (irq_re_init) {
8550                 bnxt_hwrm_stat_ctx_free(bp);
8551                 bnxt_hwrm_free_tunnel_ports(bp);
8552         }
8553 }
8554
8555 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
8556 {
8557         struct hwrm_func_cfg_input *req;
8558         u8 evb_mode;
8559         int rc;
8560
8561         if (br_mode == BRIDGE_MODE_VEB)
8562                 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
8563         else if (br_mode == BRIDGE_MODE_VEPA)
8564                 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
8565         else
8566                 return -EINVAL;
8567
8568         rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
8569         if (rc)
8570                 return rc;
8571
8572         req->fid = cpu_to_le16(0xffff);
8573         req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
8574         req->evb_mode = evb_mode;
8575         return hwrm_req_send(bp, req);
8576 }
8577
8578 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
8579 {
8580         struct hwrm_func_cfg_input *req;
8581         int rc;
8582
8583         if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
8584                 return 0;
8585
8586         rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
8587         if (rc)
8588                 return rc;
8589
8590         req->fid = cpu_to_le16(0xffff);
8591         req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
8592         req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
8593         if (size == 128)
8594                 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
8595
8596         return hwrm_req_send(bp, req);
8597 }
8598
8599 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8600 {
8601         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
8602         int rc;
8603
8604         if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
8605                 goto skip_rss_ctx;
8606
8607         /* allocate context for vnic */
8608         rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
8609         if (rc) {
8610                 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8611                            vnic_id, rc);
8612                 goto vnic_setup_err;
8613         }
8614         bp->rsscos_nr_ctxs++;
8615
8616         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8617                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
8618                 if (rc) {
8619                         netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
8620                                    vnic_id, rc);
8621                         goto vnic_setup_err;
8622                 }
8623                 bp->rsscos_nr_ctxs++;
8624         }
8625
8626 skip_rss_ctx:
8627         /* configure default vnic, ring grp */
8628         rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8629         if (rc) {
8630                 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8631                            vnic_id, rc);
8632                 goto vnic_setup_err;
8633         }
8634
8635         /* Enable RSS hashing on vnic */
8636         rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
8637         if (rc) {
8638                 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
8639                            vnic_id, rc);
8640                 goto vnic_setup_err;
8641         }
8642
8643         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8644                 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8645                 if (rc) {
8646                         netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8647                                    vnic_id, rc);
8648                 }
8649         }
8650
8651 vnic_setup_err:
8652         return rc;
8653 }
8654
8655 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
8656 {
8657         int rc, i, nr_ctxs;
8658
8659         nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
8660         for (i = 0; i < nr_ctxs; i++) {
8661                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
8662                 if (rc) {
8663                         netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
8664                                    vnic_id, i, rc);
8665                         break;
8666                 }
8667                 bp->rsscos_nr_ctxs++;
8668         }
8669         if (i < nr_ctxs)
8670                 return -ENOMEM;
8671
8672         rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
8673         if (rc) {
8674                 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
8675                            vnic_id, rc);
8676                 return rc;
8677         }
8678         rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8679         if (rc) {
8680                 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8681                            vnic_id, rc);
8682                 return rc;
8683         }
8684         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8685                 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8686                 if (rc) {
8687                         netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8688                                    vnic_id, rc);
8689                 }
8690         }
8691         return rc;
8692 }
8693
8694 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8695 {
8696         if (bp->flags & BNXT_FLAG_CHIP_P5)
8697                 return __bnxt_setup_vnic_p5(bp, vnic_id);
8698         else
8699                 return __bnxt_setup_vnic(bp, vnic_id);
8700 }
8701
8702 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
8703 {
8704 #ifdef CONFIG_RFS_ACCEL
8705         int i, rc = 0;
8706
8707         if (bp->flags & BNXT_FLAG_CHIP_P5)
8708                 return 0;
8709
8710         for (i = 0; i < bp->rx_nr_rings; i++) {
8711                 struct bnxt_vnic_info *vnic;
8712                 u16 vnic_id = i + 1;
8713                 u16 ring_id = i;
8714
8715                 if (vnic_id >= bp->nr_vnics)
8716                         break;
8717
8718                 vnic = &bp->vnic_info[vnic_id];
8719                 vnic->flags |= BNXT_VNIC_RFS_FLAG;
8720                 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
8721                         vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
8722                 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
8723                 if (rc) {
8724                         netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8725                                    vnic_id, rc);
8726                         break;
8727                 }
8728                 rc = bnxt_setup_vnic(bp, vnic_id);
8729                 if (rc)
8730                         break;
8731         }
8732         return rc;
8733 #else
8734         return 0;
8735 #endif
8736 }
8737
8738 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
8739 static bool bnxt_promisc_ok(struct bnxt *bp)
8740 {
8741 #ifdef CONFIG_BNXT_SRIOV
8742         if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
8743                 return false;
8744 #endif
8745         return true;
8746 }
8747
8748 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
8749 {
8750         unsigned int rc = 0;
8751
8752         rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
8753         if (rc) {
8754                 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8755                            rc);
8756                 return rc;
8757         }
8758
8759         rc = bnxt_hwrm_vnic_cfg(bp, 1);
8760         if (rc) {
8761                 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8762                            rc);
8763                 return rc;
8764         }
8765         return rc;
8766 }
8767
8768 static int bnxt_cfg_rx_mode(struct bnxt *);
8769 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
8770
8771 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
8772 {
8773         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8774         int rc = 0;
8775         unsigned int rx_nr_rings = bp->rx_nr_rings;
8776
8777         if (irq_re_init) {
8778                 rc = bnxt_hwrm_stat_ctx_alloc(bp);
8779                 if (rc) {
8780                         netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
8781                                    rc);
8782                         goto err_out;
8783                 }
8784         }
8785
8786         rc = bnxt_hwrm_ring_alloc(bp);
8787         if (rc) {
8788                 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
8789                 goto err_out;
8790         }
8791
8792         rc = bnxt_hwrm_ring_grp_alloc(bp);
8793         if (rc) {
8794                 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
8795                 goto err_out;
8796         }
8797
8798         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8799                 rx_nr_rings--;
8800
8801         /* default vnic 0 */
8802         rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
8803         if (rc) {
8804                 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
8805                 goto err_out;
8806         }
8807
8808         rc = bnxt_setup_vnic(bp, 0);
8809         if (rc)
8810                 goto err_out;
8811
8812         if (bp->flags & BNXT_FLAG_RFS) {
8813                 rc = bnxt_alloc_rfs_vnics(bp);
8814                 if (rc)
8815                         goto err_out;
8816         }
8817
8818         if (bp->flags & BNXT_FLAG_TPA) {
8819                 rc = bnxt_set_tpa(bp, true);
8820                 if (rc)
8821                         goto err_out;
8822         }
8823
8824         if (BNXT_VF(bp))
8825                 bnxt_update_vf_mac(bp);
8826
8827         /* Filter for default vnic 0 */
8828         rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
8829         if (rc) {
8830                 if (BNXT_VF(bp) && rc == -ENODEV)
8831                         netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n");
8832                 else
8833                         netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
8834                 goto err_out;
8835         }
8836         vnic->uc_filter_count = 1;
8837
8838         vnic->rx_mask = 0;
8839         if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state))
8840                 goto skip_rx_mask;
8841
8842         if (bp->dev->flags & IFF_BROADCAST)
8843                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
8844
8845         if (bp->dev->flags & IFF_PROMISC)
8846                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8847
8848         if (bp->dev->flags & IFF_ALLMULTI) {
8849                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8850                 vnic->mc_list_count = 0;
8851         } else if (bp->dev->flags & IFF_MULTICAST) {
8852                 u32 mask = 0;
8853
8854                 bnxt_mc_list_updated(bp, &mask);
8855                 vnic->rx_mask |= mask;
8856         }
8857
8858         rc = bnxt_cfg_rx_mode(bp);
8859         if (rc)
8860                 goto err_out;
8861
8862 skip_rx_mask:
8863         rc = bnxt_hwrm_set_coal(bp);
8864         if (rc)
8865                 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
8866                                 rc);
8867
8868         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8869                 rc = bnxt_setup_nitroa0_vnic(bp);
8870                 if (rc)
8871                         netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
8872                                    rc);
8873         }
8874
8875         if (BNXT_VF(bp)) {
8876                 bnxt_hwrm_func_qcfg(bp);
8877                 netdev_update_features(bp->dev);
8878         }
8879
8880         return 0;
8881
8882 err_out:
8883         bnxt_hwrm_resource_free(bp, 0, true);
8884
8885         return rc;
8886 }
8887
8888 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
8889 {
8890         bnxt_hwrm_resource_free(bp, 1, irq_re_init);
8891         return 0;
8892 }
8893
8894 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
8895 {
8896         bnxt_init_cp_rings(bp);
8897         bnxt_init_rx_rings(bp);
8898         bnxt_init_tx_rings(bp);
8899         bnxt_init_ring_grps(bp, irq_re_init);
8900         bnxt_init_vnics(bp);
8901
8902         return bnxt_init_chip(bp, irq_re_init);
8903 }
8904
8905 static int bnxt_set_real_num_queues(struct bnxt *bp)
8906 {
8907         int rc;
8908         struct net_device *dev = bp->dev;
8909
8910         rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
8911                                           bp->tx_nr_rings_xdp);
8912         if (rc)
8913                 return rc;
8914
8915         rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
8916         if (rc)
8917                 return rc;
8918
8919 #ifdef CONFIG_RFS_ACCEL
8920         if (bp->flags & BNXT_FLAG_RFS)
8921                 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
8922 #endif
8923
8924         return rc;
8925 }
8926
8927 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
8928                            bool shared)
8929 {
8930         int _rx = *rx, _tx = *tx;
8931
8932         if (shared) {
8933                 *rx = min_t(int, _rx, max);
8934                 *tx = min_t(int, _tx, max);
8935         } else {
8936                 if (max < 2)
8937                         return -ENOMEM;
8938
8939                 while (_rx + _tx > max) {
8940                         if (_rx > _tx && _rx > 1)
8941                                 _rx--;
8942                         else if (_tx > 1)
8943                                 _tx--;
8944                 }
8945                 *rx = _rx;
8946                 *tx = _tx;
8947         }
8948         return 0;
8949 }
8950
8951 static void bnxt_setup_msix(struct bnxt *bp)
8952 {
8953         const int len = sizeof(bp->irq_tbl[0].name);
8954         struct net_device *dev = bp->dev;
8955         int tcs, i;
8956
8957         tcs = netdev_get_num_tc(dev);
8958         if (tcs) {
8959                 int i, off, count;
8960
8961                 for (i = 0; i < tcs; i++) {
8962                         count = bp->tx_nr_rings_per_tc;
8963                         off = i * count;
8964                         netdev_set_tc_queue(dev, i, count, off);
8965                 }
8966         }
8967
8968         for (i = 0; i < bp->cp_nr_rings; i++) {
8969                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8970                 char *attr;
8971
8972                 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
8973                         attr = "TxRx";
8974                 else if (i < bp->rx_nr_rings)
8975                         attr = "rx";
8976                 else
8977                         attr = "tx";
8978
8979                 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
8980                          attr, i);
8981                 bp->irq_tbl[map_idx].handler = bnxt_msix;
8982         }
8983 }
8984
8985 static void bnxt_setup_inta(struct bnxt *bp)
8986 {
8987         const int len = sizeof(bp->irq_tbl[0].name);
8988
8989         if (netdev_get_num_tc(bp->dev))
8990                 netdev_reset_tc(bp->dev);
8991
8992         snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
8993                  0);
8994         bp->irq_tbl[0].handler = bnxt_inta;
8995 }
8996
8997 static int bnxt_init_int_mode(struct bnxt *bp);
8998
8999 static int bnxt_setup_int_mode(struct bnxt *bp)
9000 {
9001         int rc;
9002
9003         if (!bp->irq_tbl) {
9004                 rc = bnxt_init_int_mode(bp);
9005                 if (rc || !bp->irq_tbl)
9006                         return rc ?: -ENODEV;
9007         }
9008
9009         if (bp->flags & BNXT_FLAG_USING_MSIX)
9010                 bnxt_setup_msix(bp);
9011         else
9012                 bnxt_setup_inta(bp);
9013
9014         rc = bnxt_set_real_num_queues(bp);
9015         return rc;
9016 }
9017
9018 #ifdef CONFIG_RFS_ACCEL
9019 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
9020 {
9021         return bp->hw_resc.max_rsscos_ctxs;
9022 }
9023
9024 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
9025 {
9026         return bp->hw_resc.max_vnics;
9027 }
9028 #endif
9029
9030 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
9031 {
9032         return bp->hw_resc.max_stat_ctxs;
9033 }
9034
9035 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
9036 {
9037         return bp->hw_resc.max_cp_rings;
9038 }
9039
9040 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
9041 {
9042         unsigned int cp = bp->hw_resc.max_cp_rings;
9043
9044         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
9045                 cp -= bnxt_get_ulp_msix_num(bp);
9046
9047         return cp;
9048 }
9049
9050 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
9051 {
9052         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9053
9054         if (bp->flags & BNXT_FLAG_CHIP_P5)
9055                 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
9056
9057         return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
9058 }
9059
9060 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
9061 {
9062         bp->hw_resc.max_irqs = max_irqs;
9063 }
9064
9065 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
9066 {
9067         unsigned int cp;
9068
9069         cp = bnxt_get_max_func_cp_rings_for_en(bp);
9070         if (bp->flags & BNXT_FLAG_CHIP_P5)
9071                 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
9072         else
9073                 return cp - bp->cp_nr_rings;
9074 }
9075
9076 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
9077 {
9078         return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
9079 }
9080
9081 int bnxt_get_avail_msix(struct bnxt *bp, int num)
9082 {
9083         int max_cp = bnxt_get_max_func_cp_rings(bp);
9084         int max_irq = bnxt_get_max_func_irqs(bp);
9085         int total_req = bp->cp_nr_rings + num;
9086         int max_idx, avail_msix;
9087
9088         max_idx = bp->total_irqs;
9089         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
9090                 max_idx = min_t(int, bp->total_irqs, max_cp);
9091         avail_msix = max_idx - bp->cp_nr_rings;
9092         if (!BNXT_NEW_RM(bp) || avail_msix >= num)
9093                 return avail_msix;
9094
9095         if (max_irq < total_req) {
9096                 num = max_irq - bp->cp_nr_rings;
9097                 if (num <= 0)
9098                         return 0;
9099         }
9100         return num;
9101 }
9102
9103 static int bnxt_get_num_msix(struct bnxt *bp)
9104 {
9105         if (!BNXT_NEW_RM(bp))
9106                 return bnxt_get_max_func_irqs(bp);
9107
9108         return bnxt_nq_rings_in_use(bp);
9109 }
9110
9111 static int bnxt_init_msix(struct bnxt *bp)
9112 {
9113         int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
9114         struct msix_entry *msix_ent;
9115
9116         total_vecs = bnxt_get_num_msix(bp);
9117         max = bnxt_get_max_func_irqs(bp);
9118         if (total_vecs > max)
9119                 total_vecs = max;
9120
9121         if (!total_vecs)
9122                 return 0;
9123
9124         msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
9125         if (!msix_ent)
9126                 return -ENOMEM;
9127
9128         for (i = 0; i < total_vecs; i++) {
9129                 msix_ent[i].entry = i;
9130                 msix_ent[i].vector = 0;
9131         }
9132
9133         if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
9134                 min = 2;
9135
9136         total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
9137         ulp_msix = bnxt_get_ulp_msix_num(bp);
9138         if (total_vecs < 0 || total_vecs < ulp_msix) {
9139                 rc = -ENODEV;
9140                 goto msix_setup_exit;
9141         }
9142
9143         bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
9144         if (bp->irq_tbl) {
9145                 for (i = 0; i < total_vecs; i++)
9146                         bp->irq_tbl[i].vector = msix_ent[i].vector;
9147
9148                 bp->total_irqs = total_vecs;
9149                 /* Trim rings based upon num of vectors allocated */
9150                 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
9151                                      total_vecs - ulp_msix, min == 1);
9152                 if (rc)
9153                         goto msix_setup_exit;
9154
9155                 bp->cp_nr_rings = (min == 1) ?
9156                                   max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
9157                                   bp->tx_nr_rings + bp->rx_nr_rings;
9158
9159         } else {
9160                 rc = -ENOMEM;
9161                 goto msix_setup_exit;
9162         }
9163         bp->flags |= BNXT_FLAG_USING_MSIX;
9164         kfree(msix_ent);
9165         return 0;
9166
9167 msix_setup_exit:
9168         netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
9169         kfree(bp->irq_tbl);
9170         bp->irq_tbl = NULL;
9171         pci_disable_msix(bp->pdev);
9172         kfree(msix_ent);
9173         return rc;
9174 }
9175
9176 static int bnxt_init_inta(struct bnxt *bp)
9177 {
9178         bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL);
9179         if (!bp->irq_tbl)
9180                 return -ENOMEM;
9181
9182         bp->total_irqs = 1;
9183         bp->rx_nr_rings = 1;
9184         bp->tx_nr_rings = 1;
9185         bp->cp_nr_rings = 1;
9186         bp->flags |= BNXT_FLAG_SHARED_RINGS;
9187         bp->irq_tbl[0].vector = bp->pdev->irq;
9188         return 0;
9189 }
9190
9191 static int bnxt_init_int_mode(struct bnxt *bp)
9192 {
9193         int rc = -ENODEV;
9194
9195         if (bp->flags & BNXT_FLAG_MSIX_CAP)
9196                 rc = bnxt_init_msix(bp);
9197
9198         if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
9199                 /* fallback to INTA */
9200                 rc = bnxt_init_inta(bp);
9201         }
9202         return rc;
9203 }
9204
9205 static void bnxt_clear_int_mode(struct bnxt *bp)
9206 {
9207         if (bp->flags & BNXT_FLAG_USING_MSIX)
9208                 pci_disable_msix(bp->pdev);
9209
9210         kfree(bp->irq_tbl);
9211         bp->irq_tbl = NULL;
9212         bp->flags &= ~BNXT_FLAG_USING_MSIX;
9213 }
9214
9215 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
9216 {
9217         int tcs = netdev_get_num_tc(bp->dev);
9218         bool irq_cleared = false;
9219         int rc;
9220
9221         if (!bnxt_need_reserve_rings(bp))
9222                 return 0;
9223
9224         if (irq_re_init && BNXT_NEW_RM(bp) &&
9225             bnxt_get_num_msix(bp) != bp->total_irqs) {
9226                 bnxt_ulp_irq_stop(bp);
9227                 bnxt_clear_int_mode(bp);
9228                 irq_cleared = true;
9229         }
9230         rc = __bnxt_reserve_rings(bp);
9231         if (irq_cleared) {
9232                 if (!rc)
9233                         rc = bnxt_init_int_mode(bp);
9234                 bnxt_ulp_irq_restart(bp, rc);
9235         }
9236         if (rc) {
9237                 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
9238                 return rc;
9239         }
9240         if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
9241                 netdev_err(bp->dev, "tx ring reservation failure\n");
9242                 netdev_reset_tc(bp->dev);
9243                 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
9244                 return -ENOMEM;
9245         }
9246         return 0;
9247 }
9248
9249 static void bnxt_free_irq(struct bnxt *bp)
9250 {
9251         struct bnxt_irq *irq;
9252         int i;
9253
9254 #ifdef CONFIG_RFS_ACCEL
9255         free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
9256         bp->dev->rx_cpu_rmap = NULL;
9257 #endif
9258         if (!bp->irq_tbl || !bp->bnapi)
9259                 return;
9260
9261         for (i = 0; i < bp->cp_nr_rings; i++) {
9262                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
9263
9264                 irq = &bp->irq_tbl[map_idx];
9265                 if (irq->requested) {
9266                         if (irq->have_cpumask) {
9267                                 irq_set_affinity_hint(irq->vector, NULL);
9268                                 free_cpumask_var(irq->cpu_mask);
9269                                 irq->have_cpumask = 0;
9270                         }
9271                         free_irq(irq->vector, bp->bnapi[i]);
9272                 }
9273
9274                 irq->requested = 0;
9275         }
9276 }
9277
9278 static int bnxt_request_irq(struct bnxt *bp)
9279 {
9280         int i, j, rc = 0;
9281         unsigned long flags = 0;
9282 #ifdef CONFIG_RFS_ACCEL
9283         struct cpu_rmap *rmap;
9284 #endif
9285
9286         rc = bnxt_setup_int_mode(bp);
9287         if (rc) {
9288                 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
9289                            rc);
9290                 return rc;
9291         }
9292 #ifdef CONFIG_RFS_ACCEL
9293         rmap = bp->dev->rx_cpu_rmap;
9294 #endif
9295         if (!(bp->flags & BNXT_FLAG_USING_MSIX))
9296                 flags = IRQF_SHARED;
9297
9298         for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
9299                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
9300                 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
9301
9302 #ifdef CONFIG_RFS_ACCEL
9303                 if (rmap && bp->bnapi[i]->rx_ring) {
9304                         rc = irq_cpu_rmap_add(rmap, irq->vector);
9305                         if (rc)
9306                                 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
9307                                             j);
9308                         j++;
9309                 }
9310 #endif
9311                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
9312                                  bp->bnapi[i]);
9313                 if (rc)
9314                         break;
9315
9316                 irq->requested = 1;
9317
9318                 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
9319                         int numa_node = dev_to_node(&bp->pdev->dev);
9320
9321                         irq->have_cpumask = 1;
9322                         cpumask_set_cpu(cpumask_local_spread(i, numa_node),
9323                                         irq->cpu_mask);
9324                         rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
9325                         if (rc) {
9326                                 netdev_warn(bp->dev,
9327                                             "Set affinity failed, IRQ = %d\n",
9328                                             irq->vector);
9329                                 break;
9330                         }
9331                 }
9332         }
9333         return rc;
9334 }
9335
9336 static void bnxt_del_napi(struct bnxt *bp)
9337 {
9338         int i;
9339
9340         if (!bp->bnapi)
9341                 return;
9342
9343         for (i = 0; i < bp->cp_nr_rings; i++) {
9344                 struct bnxt_napi *bnapi = bp->bnapi[i];
9345
9346                 __netif_napi_del(&bnapi->napi);
9347         }
9348         /* We called __netif_napi_del(), we need
9349          * to respect an RCU grace period before freeing napi structures.
9350          */
9351         synchronize_net();
9352 }
9353
9354 static void bnxt_init_napi(struct bnxt *bp)
9355 {
9356         int i;
9357         unsigned int cp_nr_rings = bp->cp_nr_rings;
9358         struct bnxt_napi *bnapi;
9359
9360         if (bp->flags & BNXT_FLAG_USING_MSIX) {
9361                 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
9362
9363                 if (bp->flags & BNXT_FLAG_CHIP_P5)
9364                         poll_fn = bnxt_poll_p5;
9365                 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
9366                         cp_nr_rings--;
9367                 for (i = 0; i < cp_nr_rings; i++) {
9368                         bnapi = bp->bnapi[i];
9369                         netif_napi_add(bp->dev, &bnapi->napi, poll_fn);
9370                 }
9371                 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
9372                         bnapi = bp->bnapi[cp_nr_rings];
9373                         netif_napi_add(bp->dev, &bnapi->napi,
9374                                        bnxt_poll_nitroa0);
9375                 }
9376         } else {
9377                 bnapi = bp->bnapi[0];
9378                 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll);
9379         }
9380 }
9381
9382 static void bnxt_disable_napi(struct bnxt *bp)
9383 {
9384         int i;
9385
9386         if (!bp->bnapi ||
9387             test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
9388                 return;
9389
9390         for (i = 0; i < bp->cp_nr_rings; i++) {
9391                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
9392
9393                 napi_disable(&bp->bnapi[i]->napi);
9394                 if (bp->bnapi[i]->rx_ring)
9395                         cancel_work_sync(&cpr->dim.work);
9396         }
9397 }
9398
9399 static void bnxt_enable_napi(struct bnxt *bp)
9400 {
9401         int i;
9402
9403         clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
9404         for (i = 0; i < bp->cp_nr_rings; i++) {
9405                 struct bnxt_napi *bnapi = bp->bnapi[i];
9406                 struct bnxt_cp_ring_info *cpr;
9407
9408                 cpr = &bnapi->cp_ring;
9409                 if (bnapi->in_reset)
9410                         cpr->sw_stats.rx.rx_resets++;
9411                 bnapi->in_reset = false;
9412
9413                 if (bnapi->rx_ring) {
9414                         INIT_WORK(&cpr->dim.work, bnxt_dim_work);
9415                         cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
9416                 }
9417                 napi_enable(&bnapi->napi);
9418         }
9419 }
9420
9421 void bnxt_tx_disable(struct bnxt *bp)
9422 {
9423         int i;
9424         struct bnxt_tx_ring_info *txr;
9425
9426         if (bp->tx_ring) {
9427                 for (i = 0; i < bp->tx_nr_rings; i++) {
9428                         txr = &bp->tx_ring[i];
9429                         WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
9430                 }
9431         }
9432         /* Make sure napi polls see @dev_state change */
9433         synchronize_net();
9434         /* Drop carrier first to prevent TX timeout */
9435         netif_carrier_off(bp->dev);
9436         /* Stop all TX queues */
9437         netif_tx_disable(bp->dev);
9438 }
9439
9440 void bnxt_tx_enable(struct bnxt *bp)
9441 {
9442         int i;
9443         struct bnxt_tx_ring_info *txr;
9444
9445         for (i = 0; i < bp->tx_nr_rings; i++) {
9446                 txr = &bp->tx_ring[i];
9447                 WRITE_ONCE(txr->dev_state, 0);
9448         }
9449         /* Make sure napi polls see @dev_state change */
9450         synchronize_net();
9451         netif_tx_wake_all_queues(bp->dev);
9452         if (BNXT_LINK_IS_UP(bp))
9453                 netif_carrier_on(bp->dev);
9454 }
9455
9456 static char *bnxt_report_fec(struct bnxt_link_info *link_info)
9457 {
9458         u8 active_fec = link_info->active_fec_sig_mode &
9459                         PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
9460
9461         switch (active_fec) {
9462         default:
9463         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
9464                 return "None";
9465         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
9466                 return "Clause 74 BaseR";
9467         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
9468                 return "Clause 91 RS(528,514)";
9469         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
9470                 return "Clause 91 RS544_1XN";
9471         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
9472                 return "Clause 91 RS(544,514)";
9473         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
9474                 return "Clause 91 RS272_1XN";
9475         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
9476                 return "Clause 91 RS(272,257)";
9477         }
9478 }
9479
9480 void bnxt_report_link(struct bnxt *bp)
9481 {
9482         if (BNXT_LINK_IS_UP(bp)) {
9483                 const char *signal = "";
9484                 const char *flow_ctrl;
9485                 const char *duplex;
9486                 u32 speed;
9487                 u16 fec;
9488
9489                 netif_carrier_on(bp->dev);
9490                 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
9491                 if (speed == SPEED_UNKNOWN) {
9492                         netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
9493                         return;
9494                 }
9495                 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
9496                         duplex = "full";
9497                 else
9498                         duplex = "half";
9499                 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
9500                         flow_ctrl = "ON - receive & transmit";
9501                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
9502                         flow_ctrl = "ON - transmit";
9503                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
9504                         flow_ctrl = "ON - receive";
9505                 else
9506                         flow_ctrl = "none";
9507                 if (bp->link_info.phy_qcfg_resp.option_flags &
9508                     PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
9509                         u8 sig_mode = bp->link_info.active_fec_sig_mode &
9510                                       PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
9511                         switch (sig_mode) {
9512                         case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
9513                                 signal = "(NRZ) ";
9514                                 break;
9515                         case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
9516                                 signal = "(PAM4) ";
9517                                 break;
9518                         default:
9519                                 break;
9520                         }
9521                 }
9522                 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
9523                             speed, signal, duplex, flow_ctrl);
9524                 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
9525                         netdev_info(bp->dev, "EEE is %s\n",
9526                                     bp->eee.eee_active ? "active" :
9527                                                          "not active");
9528                 fec = bp->link_info.fec_cfg;
9529                 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
9530                         netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
9531                                     (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
9532                                     bnxt_report_fec(&bp->link_info));
9533         } else {
9534                 netif_carrier_off(bp->dev);
9535                 netdev_err(bp->dev, "NIC Link is Down\n");
9536         }
9537 }
9538
9539 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
9540 {
9541         if (!resp->supported_speeds_auto_mode &&
9542             !resp->supported_speeds_force_mode &&
9543             !resp->supported_pam4_speeds_auto_mode &&
9544             !resp->supported_pam4_speeds_force_mode)
9545                 return true;
9546         return false;
9547 }
9548
9549 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
9550 {
9551         struct bnxt_link_info *link_info = &bp->link_info;
9552         struct hwrm_port_phy_qcaps_output *resp;
9553         struct hwrm_port_phy_qcaps_input *req;
9554         int rc = 0;
9555
9556         if (bp->hwrm_spec_code < 0x10201)
9557                 return 0;
9558
9559         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
9560         if (rc)
9561                 return rc;
9562
9563         resp = hwrm_req_hold(bp, req);
9564         rc = hwrm_req_send(bp, req);
9565         if (rc)
9566                 goto hwrm_phy_qcaps_exit;
9567
9568         bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8);
9569         if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
9570                 struct ethtool_eee *eee = &bp->eee;
9571                 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
9572
9573                 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9574                 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
9575                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
9576                 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
9577                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
9578         }
9579
9580         if (bp->hwrm_spec_code >= 0x10a01) {
9581                 if (bnxt_phy_qcaps_no_speed(resp)) {
9582                         link_info->phy_state = BNXT_PHY_STATE_DISABLED;
9583                         netdev_warn(bp->dev, "Ethernet link disabled\n");
9584                 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
9585                         link_info->phy_state = BNXT_PHY_STATE_ENABLED;
9586                         netdev_info(bp->dev, "Ethernet link enabled\n");
9587                         /* Phy re-enabled, reprobe the speeds */
9588                         link_info->support_auto_speeds = 0;
9589                         link_info->support_pam4_auto_speeds = 0;
9590                 }
9591         }
9592         if (resp->supported_speeds_auto_mode)
9593                 link_info->support_auto_speeds =
9594                         le16_to_cpu(resp->supported_speeds_auto_mode);
9595         if (resp->supported_pam4_speeds_auto_mode)
9596                 link_info->support_pam4_auto_speeds =
9597                         le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
9598
9599         bp->port_count = resp->port_cnt;
9600
9601 hwrm_phy_qcaps_exit:
9602         hwrm_req_drop(bp, req);
9603         return rc;
9604 }
9605
9606 static bool bnxt_support_dropped(u16 advertising, u16 supported)
9607 {
9608         u16 diff = advertising ^ supported;
9609
9610         return ((supported | diff) != supported);
9611 }
9612
9613 int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
9614 {
9615         struct bnxt_link_info *link_info = &bp->link_info;
9616         struct hwrm_port_phy_qcfg_output *resp;
9617         struct hwrm_port_phy_qcfg_input *req;
9618         u8 link_state = link_info->link_state;
9619         bool support_changed = false;
9620         int rc;
9621
9622         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG);
9623         if (rc)
9624                 return rc;
9625
9626         resp = hwrm_req_hold(bp, req);
9627         rc = hwrm_req_send(bp, req);
9628         if (rc) {
9629                 hwrm_req_drop(bp, req);
9630                 if (BNXT_VF(bp) && rc == -ENODEV) {
9631                         netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n");
9632                         rc = 0;
9633                 }
9634                 return rc;
9635         }
9636
9637         memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
9638         link_info->phy_link_status = resp->link;
9639         link_info->duplex = resp->duplex_cfg;
9640         if (bp->hwrm_spec_code >= 0x10800)
9641                 link_info->duplex = resp->duplex_state;
9642         link_info->pause = resp->pause;
9643         link_info->auto_mode = resp->auto_mode;
9644         link_info->auto_pause_setting = resp->auto_pause;
9645         link_info->lp_pause = resp->link_partner_adv_pause;
9646         link_info->force_pause_setting = resp->force_pause;
9647         link_info->duplex_setting = resp->duplex_cfg;
9648         if (link_info->phy_link_status == BNXT_LINK_LINK)
9649                 link_info->link_speed = le16_to_cpu(resp->link_speed);
9650         else
9651                 link_info->link_speed = 0;
9652         link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
9653         link_info->force_pam4_link_speed =
9654                 le16_to_cpu(resp->force_pam4_link_speed);
9655         link_info->support_speeds = le16_to_cpu(resp->support_speeds);
9656         link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
9657         link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
9658         link_info->auto_pam4_link_speeds =
9659                 le16_to_cpu(resp->auto_pam4_link_speed_mask);
9660         link_info->lp_auto_link_speeds =
9661                 le16_to_cpu(resp->link_partner_adv_speeds);
9662         link_info->lp_auto_pam4_link_speeds =
9663                 resp->link_partner_pam4_adv_speeds;
9664         link_info->preemphasis = le32_to_cpu(resp->preemphasis);
9665         link_info->phy_ver[0] = resp->phy_maj;
9666         link_info->phy_ver[1] = resp->phy_min;
9667         link_info->phy_ver[2] = resp->phy_bld;
9668         link_info->media_type = resp->media_type;
9669         link_info->phy_type = resp->phy_type;
9670         link_info->transceiver = resp->xcvr_pkg_type;
9671         link_info->phy_addr = resp->eee_config_phy_addr &
9672                               PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
9673         link_info->module_status = resp->module_status;
9674
9675         if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
9676                 struct ethtool_eee *eee = &bp->eee;
9677                 u16 fw_speeds;
9678
9679                 eee->eee_active = 0;
9680                 if (resp->eee_config_phy_addr &
9681                     PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
9682                         eee->eee_active = 1;
9683                         fw_speeds = le16_to_cpu(
9684                                 resp->link_partner_adv_eee_link_speed_mask);
9685                         eee->lp_advertised =
9686                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9687                 }
9688
9689                 /* Pull initial EEE config */
9690                 if (!chng_link_state) {
9691                         if (resp->eee_config_phy_addr &
9692                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
9693                                 eee->eee_enabled = 1;
9694
9695                         fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
9696                         eee->advertised =
9697                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9698
9699                         if (resp->eee_config_phy_addr &
9700                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
9701                                 __le32 tmr;
9702
9703                                 eee->tx_lpi_enabled = 1;
9704                                 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
9705                                 eee->tx_lpi_timer = le32_to_cpu(tmr) &
9706                                         PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
9707                         }
9708                 }
9709         }
9710
9711         link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
9712         if (bp->hwrm_spec_code >= 0x10504) {
9713                 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
9714                 link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
9715         }
9716         /* TODO: need to add more logic to report VF link */
9717         if (chng_link_state) {
9718                 if (link_info->phy_link_status == BNXT_LINK_LINK)
9719                         link_info->link_state = BNXT_LINK_STATE_UP;
9720                 else
9721                         link_info->link_state = BNXT_LINK_STATE_DOWN;
9722                 if (link_state != link_info->link_state)
9723                         bnxt_report_link(bp);
9724         } else {
9725                 /* always link down if not require to update link state */
9726                 link_info->link_state = BNXT_LINK_STATE_DOWN;
9727         }
9728         hwrm_req_drop(bp, req);
9729
9730         if (!BNXT_PHY_CFG_ABLE(bp))
9731                 return 0;
9732
9733         /* Check if any advertised speeds are no longer supported. The caller
9734          * holds the link_lock mutex, so we can modify link_info settings.
9735          */
9736         if (bnxt_support_dropped(link_info->advertising,
9737                                  link_info->support_auto_speeds)) {
9738                 link_info->advertising = link_info->support_auto_speeds;
9739                 support_changed = true;
9740         }
9741         if (bnxt_support_dropped(link_info->advertising_pam4,
9742                                  link_info->support_pam4_auto_speeds)) {
9743                 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
9744                 support_changed = true;
9745         }
9746         if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
9747                 bnxt_hwrm_set_link_setting(bp, true, false);
9748         return 0;
9749 }
9750
9751 static void bnxt_get_port_module_status(struct bnxt *bp)
9752 {
9753         struct bnxt_link_info *link_info = &bp->link_info;
9754         struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
9755         u8 module_status;
9756
9757         if (bnxt_update_link(bp, true))
9758                 return;
9759
9760         module_status = link_info->module_status;
9761         switch (module_status) {
9762         case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
9763         case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
9764         case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
9765                 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
9766                             bp->pf.port_id);
9767                 if (bp->hwrm_spec_code >= 0x10201) {
9768                         netdev_warn(bp->dev, "Module part number %s\n",
9769                                     resp->phy_vendor_partnumber);
9770                 }
9771                 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
9772                         netdev_warn(bp->dev, "TX is disabled\n");
9773                 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
9774                         netdev_warn(bp->dev, "SFP+ module is shutdown\n");
9775         }
9776 }
9777
9778 static void
9779 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9780 {
9781         if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
9782                 if (bp->hwrm_spec_code >= 0x10201)
9783                         req->auto_pause =
9784                                 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
9785                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9786                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
9787                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9788                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
9789                 req->enables |=
9790                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9791         } else {
9792                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9793                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
9794                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9795                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
9796                 req->enables |=
9797                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
9798                 if (bp->hwrm_spec_code >= 0x10201) {
9799                         req->auto_pause = req->force_pause;
9800                         req->enables |= cpu_to_le32(
9801                                 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9802                 }
9803         }
9804 }
9805
9806 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9807 {
9808         if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
9809                 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
9810                 if (bp->link_info.advertising) {
9811                         req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
9812                         req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
9813                 }
9814                 if (bp->link_info.advertising_pam4) {
9815                         req->enables |=
9816                                 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
9817                         req->auto_link_pam4_speed_mask =
9818                                 cpu_to_le16(bp->link_info.advertising_pam4);
9819                 }
9820                 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
9821                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
9822         } else {
9823                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
9824                 if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
9825                         req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9826                         req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
9827                 } else {
9828                         req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9829                 }
9830         }
9831
9832         /* tell chimp that the setting takes effect immediately */
9833         req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
9834 }
9835
9836 int bnxt_hwrm_set_pause(struct bnxt *bp)
9837 {
9838         struct hwrm_port_phy_cfg_input *req;
9839         int rc;
9840
9841         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9842         if (rc)
9843                 return rc;
9844
9845         bnxt_hwrm_set_pause_common(bp, req);
9846
9847         if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
9848             bp->link_info.force_link_chng)
9849                 bnxt_hwrm_set_link_common(bp, req);
9850
9851         rc = hwrm_req_send(bp, req);
9852         if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
9853                 /* since changing of pause setting doesn't trigger any link
9854                  * change event, the driver needs to update the current pause
9855                  * result upon successfully return of the phy_cfg command
9856                  */
9857                 bp->link_info.pause =
9858                 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
9859                 bp->link_info.auto_pause_setting = 0;
9860                 if (!bp->link_info.force_link_chng)
9861                         bnxt_report_link(bp);
9862         }
9863         bp->link_info.force_link_chng = false;
9864         return rc;
9865 }
9866
9867 static void bnxt_hwrm_set_eee(struct bnxt *bp,
9868                               struct hwrm_port_phy_cfg_input *req)
9869 {
9870         struct ethtool_eee *eee = &bp->eee;
9871
9872         if (eee->eee_enabled) {
9873                 u16 eee_speeds;
9874                 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
9875
9876                 if (eee->tx_lpi_enabled)
9877                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
9878                 else
9879                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
9880
9881                 req->flags |= cpu_to_le32(flags);
9882                 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
9883                 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
9884                 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
9885         } else {
9886                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
9887         }
9888 }
9889
9890 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
9891 {
9892         struct hwrm_port_phy_cfg_input *req;
9893         int rc;
9894
9895         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9896         if (rc)
9897                 return rc;
9898
9899         if (set_pause)
9900                 bnxt_hwrm_set_pause_common(bp, req);
9901
9902         bnxt_hwrm_set_link_common(bp, req);
9903
9904         if (set_eee)
9905                 bnxt_hwrm_set_eee(bp, req);
9906         return hwrm_req_send(bp, req);
9907 }
9908
9909 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
9910 {
9911         struct hwrm_port_phy_cfg_input *req;
9912         int rc;
9913
9914         if (!BNXT_SINGLE_PF(bp))
9915                 return 0;
9916
9917         if (pci_num_vf(bp->pdev) &&
9918             !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
9919                 return 0;
9920
9921         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9922         if (rc)
9923                 return rc;
9924
9925         req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
9926         rc = hwrm_req_send(bp, req);
9927         if (!rc) {
9928                 mutex_lock(&bp->link_lock);
9929                 /* Device is not obliged link down in certain scenarios, even
9930                  * when forced. Setting the state unknown is consistent with
9931                  * driver startup and will force link state to be reported
9932                  * during subsequent open based on PORT_PHY_QCFG.
9933                  */
9934                 bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN;
9935                 mutex_unlock(&bp->link_lock);
9936         }
9937         return rc;
9938 }
9939
9940 static int bnxt_fw_reset_via_optee(struct bnxt *bp)
9941 {
9942 #ifdef CONFIG_TEE_BNXT_FW
9943         int rc = tee_bnxt_fw_load();
9944
9945         if (rc)
9946                 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
9947
9948         return rc;
9949 #else
9950         netdev_err(bp->dev, "OP-TEE not supported\n");
9951         return -ENODEV;
9952 #endif
9953 }
9954
9955 static int bnxt_try_recover_fw(struct bnxt *bp)
9956 {
9957         if (bp->fw_health && bp->fw_health->status_reliable) {
9958                 int retry = 0, rc;
9959                 u32 sts;
9960
9961                 do {
9962                         sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
9963                         rc = bnxt_hwrm_poll(bp);
9964                         if (!BNXT_FW_IS_BOOTING(sts) &&
9965                             !BNXT_FW_IS_RECOVERING(sts))
9966                                 break;
9967                         retry++;
9968                 } while (rc == -EBUSY && retry < BNXT_FW_RETRY);
9969
9970                 if (!BNXT_FW_IS_HEALTHY(sts)) {
9971                         netdev_err(bp->dev,
9972                                    "Firmware not responding, status: 0x%x\n",
9973                                    sts);
9974                         rc = -ENODEV;
9975                 }
9976                 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
9977                         netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
9978                         return bnxt_fw_reset_via_optee(bp);
9979                 }
9980                 return rc;
9981         }
9982
9983         return -ENODEV;
9984 }
9985
9986 static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
9987 {
9988         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9989
9990         if (!BNXT_NEW_RM(bp))
9991                 return; /* no resource reservations required */
9992
9993         hw_resc->resv_cp_rings = 0;
9994         hw_resc->resv_stat_ctxs = 0;
9995         hw_resc->resv_irqs = 0;
9996         hw_resc->resv_tx_rings = 0;
9997         hw_resc->resv_rx_rings = 0;
9998         hw_resc->resv_hw_ring_grps = 0;
9999         hw_resc->resv_vnics = 0;
10000         if (!fw_reset) {
10001                 bp->tx_nr_rings = 0;
10002                 bp->rx_nr_rings = 0;
10003         }
10004 }
10005
10006 int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)
10007 {
10008         int rc;
10009
10010         if (!BNXT_NEW_RM(bp))
10011                 return 0; /* no resource reservations required */
10012
10013         rc = bnxt_hwrm_func_resc_qcaps(bp, true);
10014         if (rc)
10015                 netdev_err(bp->dev, "resc_qcaps failed\n");
10016
10017         bnxt_clear_reservations(bp, fw_reset);
10018
10019         return rc;
10020 }
10021
10022 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
10023 {
10024         struct hwrm_func_drv_if_change_output *resp;
10025         struct hwrm_func_drv_if_change_input *req;
10026         bool fw_reset = !bp->irq_tbl;
10027         bool resc_reinit = false;
10028         int rc, retry = 0;
10029         u32 flags = 0;
10030
10031         if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
10032                 return 0;
10033
10034         rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE);
10035         if (rc)
10036                 return rc;
10037
10038         if (up)
10039                 req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
10040         resp = hwrm_req_hold(bp, req);
10041
10042         hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
10043         while (retry < BNXT_FW_IF_RETRY) {
10044                 rc = hwrm_req_send(bp, req);
10045                 if (rc != -EAGAIN)
10046                         break;
10047
10048                 msleep(50);
10049                 retry++;
10050         }
10051
10052         if (rc == -EAGAIN) {
10053                 hwrm_req_drop(bp, req);
10054                 return rc;
10055         } else if (!rc) {
10056                 flags = le32_to_cpu(resp->flags);
10057         } else if (up) {
10058                 rc = bnxt_try_recover_fw(bp);
10059                 fw_reset = true;
10060         }
10061         hwrm_req_drop(bp, req);
10062         if (rc)
10063                 return rc;
10064
10065         if (!up) {
10066                 bnxt_inv_fw_health_reg(bp);
10067                 return 0;
10068         }
10069
10070         if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
10071                 resc_reinit = true;
10072         if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE ||
10073             test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
10074                 fw_reset = true;
10075         else
10076                 bnxt_remap_fw_health_regs(bp);
10077
10078         if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
10079                 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
10080                 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10081                 return -ENODEV;
10082         }
10083         if (resc_reinit || fw_reset) {
10084                 if (fw_reset) {
10085                         set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
10086                         if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10087                                 bnxt_ulp_stop(bp);
10088                         bnxt_free_ctx_mem(bp);
10089                         kfree(bp->ctx);
10090                         bp->ctx = NULL;
10091                         bnxt_dcb_free(bp);
10092                         rc = bnxt_fw_init_one(bp);
10093                         if (rc) {
10094                                 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
10095                                 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10096                                 return rc;
10097                         }
10098                         bnxt_clear_int_mode(bp);
10099                         rc = bnxt_init_int_mode(bp);
10100                         if (rc) {
10101                                 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
10102                                 netdev_err(bp->dev, "init int mode failed\n");
10103                                 return rc;
10104                         }
10105                 }
10106                 rc = bnxt_cancel_reservations(bp, fw_reset);
10107         }
10108         return rc;
10109 }
10110
10111 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
10112 {
10113         struct hwrm_port_led_qcaps_output *resp;
10114         struct hwrm_port_led_qcaps_input *req;
10115         struct bnxt_pf_info *pf = &bp->pf;
10116         int rc;
10117
10118         bp->num_leds = 0;
10119         if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
10120                 return 0;
10121
10122         rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS);
10123         if (rc)
10124                 return rc;
10125
10126         req->port_id = cpu_to_le16(pf->port_id);
10127         resp = hwrm_req_hold(bp, req);
10128         rc = hwrm_req_send(bp, req);
10129         if (rc) {
10130                 hwrm_req_drop(bp, req);
10131                 return rc;
10132         }
10133         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
10134                 int i;
10135
10136                 bp->num_leds = resp->num_leds;
10137                 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
10138                                                  bp->num_leds);
10139                 for (i = 0; i < bp->num_leds; i++) {
10140                         struct bnxt_led_info *led = &bp->leds[i];
10141                         __le16 caps = led->led_state_caps;
10142
10143                         if (!led->led_group_id ||
10144                             !BNXT_LED_ALT_BLINK_CAP(caps)) {
10145                                 bp->num_leds = 0;
10146                                 break;
10147                         }
10148                 }
10149         }
10150         hwrm_req_drop(bp, req);
10151         return 0;
10152 }
10153
10154 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
10155 {
10156         struct hwrm_wol_filter_alloc_output *resp;
10157         struct hwrm_wol_filter_alloc_input *req;
10158         int rc;
10159
10160         rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC);
10161         if (rc)
10162                 return rc;
10163
10164         req->port_id = cpu_to_le16(bp->pf.port_id);
10165         req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
10166         req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
10167         memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN);
10168
10169         resp = hwrm_req_hold(bp, req);
10170         rc = hwrm_req_send(bp, req);
10171         if (!rc)
10172                 bp->wol_filter_id = resp->wol_filter_id;
10173         hwrm_req_drop(bp, req);
10174         return rc;
10175 }
10176
10177 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
10178 {
10179         struct hwrm_wol_filter_free_input *req;
10180         int rc;
10181
10182         rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE);
10183         if (rc)
10184                 return rc;
10185
10186         req->port_id = cpu_to_le16(bp->pf.port_id);
10187         req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
10188         req->wol_filter_id = bp->wol_filter_id;
10189
10190         return hwrm_req_send(bp, req);
10191 }
10192
10193 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
10194 {
10195         struct hwrm_wol_filter_qcfg_output *resp;
10196         struct hwrm_wol_filter_qcfg_input *req;
10197         u16 next_handle = 0;
10198         int rc;
10199
10200         rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG);
10201         if (rc)
10202                 return rc;
10203
10204         req->port_id = cpu_to_le16(bp->pf.port_id);
10205         req->handle = cpu_to_le16(handle);
10206         resp = hwrm_req_hold(bp, req);
10207         rc = hwrm_req_send(bp, req);
10208         if (!rc) {
10209                 next_handle = le16_to_cpu(resp->next_handle);
10210                 if (next_handle != 0) {
10211                         if (resp->wol_type ==
10212                             WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
10213                                 bp->wol = 1;
10214                                 bp->wol_filter_id = resp->wol_filter_id;
10215                         }
10216                 }
10217         }
10218         hwrm_req_drop(bp, req);
10219         return next_handle;
10220 }
10221
10222 static void bnxt_get_wol_settings(struct bnxt *bp)
10223 {
10224         u16 handle = 0;
10225
10226         bp->wol = 0;
10227         if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
10228                 return;
10229
10230         do {
10231                 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
10232         } while (handle && handle != 0xffff);
10233 }
10234
10235 #ifdef CONFIG_BNXT_HWMON
10236 static ssize_t bnxt_show_temp(struct device *dev,
10237                               struct device_attribute *devattr, char *buf)
10238 {
10239         struct hwrm_temp_monitor_query_output *resp;
10240         struct hwrm_temp_monitor_query_input *req;
10241         struct bnxt *bp = dev_get_drvdata(dev);
10242         u32 len = 0;
10243         int rc;
10244
10245         rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
10246         if (rc)
10247                 return rc;
10248         resp = hwrm_req_hold(bp, req);
10249         rc = hwrm_req_send(bp, req);
10250         if (!rc)
10251                 len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
10252         hwrm_req_drop(bp, req);
10253         if (rc)
10254                 return rc;
10255         return len;
10256 }
10257 static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
10258
10259 static struct attribute *bnxt_attrs[] = {
10260         &sensor_dev_attr_temp1_input.dev_attr.attr,
10261         NULL
10262 };
10263 ATTRIBUTE_GROUPS(bnxt);
10264
10265 static void bnxt_hwmon_close(struct bnxt *bp)
10266 {
10267         if (bp->hwmon_dev) {
10268                 hwmon_device_unregister(bp->hwmon_dev);
10269                 bp->hwmon_dev = NULL;
10270         }
10271 }
10272
10273 static void bnxt_hwmon_open(struct bnxt *bp)
10274 {
10275         struct hwrm_temp_monitor_query_input *req;
10276         struct pci_dev *pdev = bp->pdev;
10277         int rc;
10278
10279         rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
10280         if (!rc)
10281                 rc = hwrm_req_send_silent(bp, req);
10282         if (rc == -EACCES || rc == -EOPNOTSUPP) {
10283                 bnxt_hwmon_close(bp);
10284                 return;
10285         }
10286
10287         if (bp->hwmon_dev)
10288                 return;
10289
10290         bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
10291                                                           DRV_MODULE_NAME, bp,
10292                                                           bnxt_groups);
10293         if (IS_ERR(bp->hwmon_dev)) {
10294                 bp->hwmon_dev = NULL;
10295                 dev_warn(&pdev->dev, "Cannot register hwmon device\n");
10296         }
10297 }
10298 #else
10299 static void bnxt_hwmon_close(struct bnxt *bp)
10300 {
10301 }
10302
10303 static void bnxt_hwmon_open(struct bnxt *bp)
10304 {
10305 }
10306 #endif
10307
10308 static bool bnxt_eee_config_ok(struct bnxt *bp)
10309 {
10310         struct ethtool_eee *eee = &bp->eee;
10311         struct bnxt_link_info *link_info = &bp->link_info;
10312
10313         if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
10314                 return true;
10315
10316         if (eee->eee_enabled) {
10317                 u32 advertising =
10318                         _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
10319
10320                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
10321                         eee->eee_enabled = 0;
10322                         return false;
10323                 }
10324                 if (eee->advertised & ~advertising) {
10325                         eee->advertised = advertising & eee->supported;
10326                         return false;
10327                 }
10328         }
10329         return true;
10330 }
10331
10332 static int bnxt_update_phy_setting(struct bnxt *bp)
10333 {
10334         int rc;
10335         bool update_link = false;
10336         bool update_pause = false;
10337         bool update_eee = false;
10338         struct bnxt_link_info *link_info = &bp->link_info;
10339
10340         rc = bnxt_update_link(bp, true);
10341         if (rc) {
10342                 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
10343                            rc);
10344                 return rc;
10345         }
10346         if (!BNXT_SINGLE_PF(bp))
10347                 return 0;
10348
10349         if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
10350             (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
10351             link_info->req_flow_ctrl)
10352                 update_pause = true;
10353         if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
10354             link_info->force_pause_setting != link_info->req_flow_ctrl)
10355                 update_pause = true;
10356         if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
10357                 if (BNXT_AUTO_MODE(link_info->auto_mode))
10358                         update_link = true;
10359                 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
10360                     link_info->req_link_speed != link_info->force_link_speed)
10361                         update_link = true;
10362                 else if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
10363                          link_info->req_link_speed != link_info->force_pam4_link_speed)
10364                         update_link = true;
10365                 if (link_info->req_duplex != link_info->duplex_setting)
10366                         update_link = true;
10367         } else {
10368                 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
10369                         update_link = true;
10370                 if (link_info->advertising != link_info->auto_link_speeds ||
10371                     link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
10372                         update_link = true;
10373         }
10374
10375         /* The last close may have shutdown the link, so need to call
10376          * PHY_CFG to bring it back up.
10377          */
10378         if (!BNXT_LINK_IS_UP(bp))
10379                 update_link = true;
10380
10381         if (!bnxt_eee_config_ok(bp))
10382                 update_eee = true;
10383
10384         if (update_link)
10385                 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
10386         else if (update_pause)
10387                 rc = bnxt_hwrm_set_pause(bp);
10388         if (rc) {
10389                 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
10390                            rc);
10391                 return rc;
10392         }
10393
10394         return rc;
10395 }
10396
10397 /* Common routine to pre-map certain register block to different GRC window.
10398  * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
10399  * in PF and 3 windows in VF that can be customized to map in different
10400  * register blocks.
10401  */
10402 static void bnxt_preset_reg_win(struct bnxt *bp)
10403 {
10404         if (BNXT_PF(bp)) {
10405                 /* CAG registers map to GRC window #4 */
10406                 writel(BNXT_CAG_REG_BASE,
10407                        bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
10408         }
10409 }
10410
10411 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
10412
10413 static int bnxt_reinit_after_abort(struct bnxt *bp)
10414 {
10415         int rc;
10416
10417         if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10418                 return -EBUSY;
10419
10420         if (bp->dev->reg_state == NETREG_UNREGISTERED)
10421                 return -ENODEV;
10422
10423         rc = bnxt_fw_init_one(bp);
10424         if (!rc) {
10425                 bnxt_clear_int_mode(bp);
10426                 rc = bnxt_init_int_mode(bp);
10427                 if (!rc) {
10428                         clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10429                         set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
10430                 }
10431         }
10432         return rc;
10433 }
10434
10435 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10436 {
10437         int rc = 0;
10438
10439         bnxt_preset_reg_win(bp);
10440         netif_carrier_off(bp->dev);
10441         if (irq_re_init) {
10442                 /* Reserve rings now if none were reserved at driver probe. */
10443                 rc = bnxt_init_dflt_ring_mode(bp);
10444                 if (rc) {
10445                         netdev_err(bp->dev, "Failed to reserve default rings at open\n");
10446                         return rc;
10447                 }
10448         }
10449         rc = bnxt_reserve_rings(bp, irq_re_init);
10450         if (rc)
10451                 return rc;
10452         if ((bp->flags & BNXT_FLAG_RFS) &&
10453             !(bp->flags & BNXT_FLAG_USING_MSIX)) {
10454                 /* disable RFS if falling back to INTA */
10455                 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
10456                 bp->flags &= ~BNXT_FLAG_RFS;
10457         }
10458
10459         rc = bnxt_alloc_mem(bp, irq_re_init);
10460         if (rc) {
10461                 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10462                 goto open_err_free_mem;
10463         }
10464
10465         if (irq_re_init) {
10466                 bnxt_init_napi(bp);
10467                 rc = bnxt_request_irq(bp);
10468                 if (rc) {
10469                         netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
10470                         goto open_err_irq;
10471                 }
10472         }
10473
10474         rc = bnxt_init_nic(bp, irq_re_init);
10475         if (rc) {
10476                 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
10477                 goto open_err_irq;
10478         }
10479
10480         bnxt_enable_napi(bp);
10481         bnxt_debug_dev_init(bp);
10482
10483         if (link_re_init) {
10484                 mutex_lock(&bp->link_lock);
10485                 rc = bnxt_update_phy_setting(bp);
10486                 mutex_unlock(&bp->link_lock);
10487                 if (rc) {
10488                         netdev_warn(bp->dev, "failed to update phy settings\n");
10489                         if (BNXT_SINGLE_PF(bp)) {
10490                                 bp->link_info.phy_retry = true;
10491                                 bp->link_info.phy_retry_expires =
10492                                         jiffies + 5 * HZ;
10493                         }
10494                 }
10495         }
10496
10497         if (irq_re_init)
10498                 udp_tunnel_nic_reset_ntf(bp->dev);
10499
10500         if (bp->tx_nr_rings_xdp < num_possible_cpus()) {
10501                 if (!static_key_enabled(&bnxt_xdp_locking_key))
10502                         static_branch_enable(&bnxt_xdp_locking_key);
10503         } else if (static_key_enabled(&bnxt_xdp_locking_key)) {
10504                 static_branch_disable(&bnxt_xdp_locking_key);
10505         }
10506         set_bit(BNXT_STATE_OPEN, &bp->state);
10507         bnxt_enable_int(bp);
10508         /* Enable TX queues */
10509         bnxt_tx_enable(bp);
10510         mod_timer(&bp->timer, jiffies + bp->current_interval);
10511         /* Poll link status and check for SFP+ module status */
10512         mutex_lock(&bp->link_lock);
10513         bnxt_get_port_module_status(bp);
10514         mutex_unlock(&bp->link_lock);
10515
10516         /* VF-reps may need to be re-opened after the PF is re-opened */
10517         if (BNXT_PF(bp))
10518                 bnxt_vf_reps_open(bp);
10519         bnxt_ptp_init_rtc(bp, true);
10520         bnxt_ptp_cfg_tstamp_filters(bp);
10521         return 0;
10522
10523 open_err_irq:
10524         bnxt_del_napi(bp);
10525
10526 open_err_free_mem:
10527         bnxt_free_skbs(bp);
10528         bnxt_free_irq(bp);
10529         bnxt_free_mem(bp, true);
10530         return rc;
10531 }
10532
10533 /* rtnl_lock held */
10534 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10535 {
10536         int rc = 0;
10537
10538         if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
10539                 rc = -EIO;
10540         if (!rc)
10541                 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
10542         if (rc) {
10543                 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
10544                 dev_close(bp->dev);
10545         }
10546         return rc;
10547 }
10548
10549 /* rtnl_lock held, open the NIC half way by allocating all resources, but
10550  * NAPI, IRQ, and TX are not enabled.  This is mainly used for offline
10551  * self tests.
10552  */
10553 int bnxt_half_open_nic(struct bnxt *bp)
10554 {
10555         int rc = 0;
10556
10557         if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
10558                 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
10559                 rc = -ENODEV;
10560                 goto half_open_err;
10561         }
10562
10563         rc = bnxt_alloc_mem(bp, true);
10564         if (rc) {
10565                 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10566                 goto half_open_err;
10567         }
10568         set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
10569         rc = bnxt_init_nic(bp, true);
10570         if (rc) {
10571                 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
10572                 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
10573                 goto half_open_err;
10574         }
10575         return 0;
10576
10577 half_open_err:
10578         bnxt_free_skbs(bp);
10579         bnxt_free_mem(bp, true);
10580         dev_close(bp->dev);
10581         return rc;
10582 }
10583
10584 /* rtnl_lock held, this call can only be made after a previous successful
10585  * call to bnxt_half_open_nic().
10586  */
10587 void bnxt_half_close_nic(struct bnxt *bp)
10588 {
10589         bnxt_hwrm_resource_free(bp, false, true);
10590         bnxt_free_skbs(bp);
10591         bnxt_free_mem(bp, true);
10592         clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
10593 }
10594
10595 void bnxt_reenable_sriov(struct bnxt *bp)
10596 {
10597         if (BNXT_PF(bp)) {
10598                 struct bnxt_pf_info *pf = &bp->pf;
10599                 int n = pf->active_vfs;
10600
10601                 if (n)
10602                         bnxt_cfg_hw_sriov(bp, &n, true);
10603         }
10604 }
10605
10606 static int bnxt_open(struct net_device *dev)
10607 {
10608         struct bnxt *bp = netdev_priv(dev);
10609         int rc;
10610
10611         if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
10612                 rc = bnxt_reinit_after_abort(bp);
10613                 if (rc) {
10614                         if (rc == -EBUSY)
10615                                 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
10616                         else
10617                                 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
10618                         return -ENODEV;
10619                 }
10620         }
10621
10622         rc = bnxt_hwrm_if_change(bp, true);
10623         if (rc)
10624                 return rc;
10625
10626         rc = __bnxt_open_nic(bp, true, true);
10627         if (rc) {
10628                 bnxt_hwrm_if_change(bp, false);
10629         } else {
10630                 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
10631                         if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10632                                 bnxt_ulp_start(bp, 0);
10633                                 bnxt_reenable_sriov(bp);
10634                         }
10635                 }
10636                 bnxt_hwmon_open(bp);
10637         }
10638
10639         return rc;
10640 }
10641
10642 static bool bnxt_drv_busy(struct bnxt *bp)
10643 {
10644         return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
10645                 test_bit(BNXT_STATE_READ_STATS, &bp->state));
10646 }
10647
10648 static void bnxt_get_ring_stats(struct bnxt *bp,
10649                                 struct rtnl_link_stats64 *stats);
10650
10651 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
10652                              bool link_re_init)
10653 {
10654         /* Close the VF-reps before closing PF */
10655         if (BNXT_PF(bp))
10656                 bnxt_vf_reps_close(bp);
10657
10658         /* Change device state to avoid TX queue wake up's */
10659         bnxt_tx_disable(bp);
10660
10661         clear_bit(BNXT_STATE_OPEN, &bp->state);
10662         smp_mb__after_atomic();
10663         while (bnxt_drv_busy(bp))
10664                 msleep(20);
10665
10666         /* Flush rings and disable interrupts */
10667         bnxt_shutdown_nic(bp, irq_re_init);
10668
10669         /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
10670
10671         bnxt_debug_dev_exit(bp);
10672         bnxt_disable_napi(bp);
10673         del_timer_sync(&bp->timer);
10674         bnxt_free_skbs(bp);
10675
10676         /* Save ring stats before shutdown */
10677         if (bp->bnapi && irq_re_init)
10678                 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
10679         if (irq_re_init) {
10680                 bnxt_free_irq(bp);
10681                 bnxt_del_napi(bp);
10682         }
10683         bnxt_free_mem(bp, irq_re_init);
10684 }
10685
10686 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10687 {
10688         int rc = 0;
10689
10690         if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10691                 /* If we get here, it means firmware reset is in progress
10692                  * while we are trying to close.  We can safely proceed with
10693                  * the close because we are holding rtnl_lock().  Some firmware
10694                  * messages may fail as we proceed to close.  We set the
10695                  * ABORT_ERR flag here so that the FW reset thread will later
10696                  * abort when it gets the rtnl_lock() and sees the flag.
10697                  */
10698                 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
10699                 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10700         }
10701
10702 #ifdef CONFIG_BNXT_SRIOV
10703         if (bp->sriov_cfg) {
10704                 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
10705                                                       !bp->sriov_cfg,
10706                                                       BNXT_SRIOV_CFG_WAIT_TMO);
10707                 if (rc)
10708                         netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
10709         }
10710 #endif
10711         __bnxt_close_nic(bp, irq_re_init, link_re_init);
10712         return rc;
10713 }
10714
10715 static int bnxt_close(struct net_device *dev)
10716 {
10717         struct bnxt *bp = netdev_priv(dev);
10718
10719         bnxt_hwmon_close(bp);
10720         bnxt_close_nic(bp, true, true);
10721         bnxt_hwrm_shutdown_link(bp);
10722         bnxt_hwrm_if_change(bp, false);
10723         return 0;
10724 }
10725
10726 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
10727                                    u16 *val)
10728 {
10729         struct hwrm_port_phy_mdio_read_output *resp;
10730         struct hwrm_port_phy_mdio_read_input *req;
10731         int rc;
10732
10733         if (bp->hwrm_spec_code < 0x10a00)
10734                 return -EOPNOTSUPP;
10735
10736         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ);
10737         if (rc)
10738                 return rc;
10739
10740         req->port_id = cpu_to_le16(bp->pf.port_id);
10741         req->phy_addr = phy_addr;
10742         req->reg_addr = cpu_to_le16(reg & 0x1f);
10743         if (mdio_phy_id_is_c45(phy_addr)) {
10744                 req->cl45_mdio = 1;
10745                 req->phy_addr = mdio_phy_id_prtad(phy_addr);
10746                 req->dev_addr = mdio_phy_id_devad(phy_addr);
10747                 req->reg_addr = cpu_to_le16(reg);
10748         }
10749
10750         resp = hwrm_req_hold(bp, req);
10751         rc = hwrm_req_send(bp, req);
10752         if (!rc)
10753                 *val = le16_to_cpu(resp->reg_data);
10754         hwrm_req_drop(bp, req);
10755         return rc;
10756 }
10757
10758 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
10759                                     u16 val)
10760 {
10761         struct hwrm_port_phy_mdio_write_input *req;
10762         int rc;
10763
10764         if (bp->hwrm_spec_code < 0x10a00)
10765                 return -EOPNOTSUPP;
10766
10767         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE);
10768         if (rc)
10769                 return rc;
10770
10771         req->port_id = cpu_to_le16(bp->pf.port_id);
10772         req->phy_addr = phy_addr;
10773         req->reg_addr = cpu_to_le16(reg & 0x1f);
10774         if (mdio_phy_id_is_c45(phy_addr)) {
10775                 req->cl45_mdio = 1;
10776                 req->phy_addr = mdio_phy_id_prtad(phy_addr);
10777                 req->dev_addr = mdio_phy_id_devad(phy_addr);
10778                 req->reg_addr = cpu_to_le16(reg);
10779         }
10780         req->reg_data = cpu_to_le16(val);
10781
10782         return hwrm_req_send(bp, req);
10783 }
10784
10785 /* rtnl_lock held */
10786 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10787 {
10788         struct mii_ioctl_data *mdio = if_mii(ifr);
10789         struct bnxt *bp = netdev_priv(dev);
10790         int rc;
10791
10792         switch (cmd) {
10793         case SIOCGMIIPHY:
10794                 mdio->phy_id = bp->link_info.phy_addr;
10795
10796                 fallthrough;
10797         case SIOCGMIIREG: {
10798                 u16 mii_regval = 0;
10799
10800                 if (!netif_running(dev))
10801                         return -EAGAIN;
10802
10803                 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
10804                                              &mii_regval);
10805                 mdio->val_out = mii_regval;
10806                 return rc;
10807         }
10808
10809         case SIOCSMIIREG:
10810                 if (!netif_running(dev))
10811                         return -EAGAIN;
10812
10813                 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
10814                                                 mdio->val_in);
10815
10816         case SIOCSHWTSTAMP:
10817                 return bnxt_hwtstamp_set(dev, ifr);
10818
10819         case SIOCGHWTSTAMP:
10820                 return bnxt_hwtstamp_get(dev, ifr);
10821
10822         default:
10823                 /* do nothing */
10824                 break;
10825         }
10826         return -EOPNOTSUPP;
10827 }
10828
10829 static void bnxt_get_ring_stats(struct bnxt *bp,
10830                                 struct rtnl_link_stats64 *stats)
10831 {
10832         int i;
10833
10834         for (i = 0; i < bp->cp_nr_rings; i++) {
10835                 struct bnxt_napi *bnapi = bp->bnapi[i];
10836                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
10837                 u64 *sw = cpr->stats.sw_stats;
10838
10839                 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
10840                 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10841                 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
10842
10843                 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
10844                 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
10845                 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
10846
10847                 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
10848                 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
10849                 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
10850
10851                 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
10852                 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
10853                 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
10854
10855                 stats->rx_missed_errors +=
10856                         BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
10857
10858                 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10859
10860                 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
10861
10862                 stats->rx_dropped +=
10863                         cpr->sw_stats.rx.rx_netpoll_discards +
10864                         cpr->sw_stats.rx.rx_oom_discards;
10865         }
10866 }
10867
10868 static void bnxt_add_prev_stats(struct bnxt *bp,
10869                                 struct rtnl_link_stats64 *stats)
10870 {
10871         struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
10872
10873         stats->rx_packets += prev_stats->rx_packets;
10874         stats->tx_packets += prev_stats->tx_packets;
10875         stats->rx_bytes += prev_stats->rx_bytes;
10876         stats->tx_bytes += prev_stats->tx_bytes;
10877         stats->rx_missed_errors += prev_stats->rx_missed_errors;
10878         stats->multicast += prev_stats->multicast;
10879         stats->rx_dropped += prev_stats->rx_dropped;
10880         stats->tx_dropped += prev_stats->tx_dropped;
10881 }
10882
10883 static void
10884 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
10885 {
10886         struct bnxt *bp = netdev_priv(dev);
10887
10888         set_bit(BNXT_STATE_READ_STATS, &bp->state);
10889         /* Make sure bnxt_close_nic() sees that we are reading stats before
10890          * we check the BNXT_STATE_OPEN flag.
10891          */
10892         smp_mb__after_atomic();
10893         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10894                 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10895                 *stats = bp->net_stats_prev;
10896                 return;
10897         }
10898
10899         bnxt_get_ring_stats(bp, stats);
10900         bnxt_add_prev_stats(bp, stats);
10901
10902         if (bp->flags & BNXT_FLAG_PORT_STATS) {
10903                 u64 *rx = bp->port_stats.sw_stats;
10904                 u64 *tx = bp->port_stats.sw_stats +
10905                           BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10906
10907                 stats->rx_crc_errors =
10908                         BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
10909                 stats->rx_frame_errors =
10910                         BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
10911                 stats->rx_length_errors =
10912                         BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
10913                         BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
10914                         BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
10915                 stats->rx_errors =
10916                         BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
10917                         BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
10918                 stats->collisions =
10919                         BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
10920                 stats->tx_fifo_errors =
10921                         BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
10922                 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
10923         }
10924         clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10925 }
10926
10927 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
10928 {
10929         struct net_device *dev = bp->dev;
10930         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10931         struct netdev_hw_addr *ha;
10932         u8 *haddr;
10933         int mc_count = 0;
10934         bool update = false;
10935         int off = 0;
10936
10937         netdev_for_each_mc_addr(ha, dev) {
10938                 if (mc_count >= BNXT_MAX_MC_ADDRS) {
10939                         *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10940                         vnic->mc_list_count = 0;
10941                         return false;
10942                 }
10943                 haddr = ha->addr;
10944                 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
10945                         memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
10946                         update = true;
10947                 }
10948                 off += ETH_ALEN;
10949                 mc_count++;
10950         }
10951         if (mc_count)
10952                 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
10953
10954         if (mc_count != vnic->mc_list_count) {
10955                 vnic->mc_list_count = mc_count;
10956                 update = true;
10957         }
10958         return update;
10959 }
10960
10961 static bool bnxt_uc_list_updated(struct bnxt *bp)
10962 {
10963         struct net_device *dev = bp->dev;
10964         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10965         struct netdev_hw_addr *ha;
10966         int off = 0;
10967
10968         if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
10969                 return true;
10970
10971         netdev_for_each_uc_addr(ha, dev) {
10972                 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
10973                         return true;
10974
10975                 off += ETH_ALEN;
10976         }
10977         return false;
10978 }
10979
10980 static void bnxt_set_rx_mode(struct net_device *dev)
10981 {
10982         struct bnxt *bp = netdev_priv(dev);
10983         struct bnxt_vnic_info *vnic;
10984         bool mc_update = false;
10985         bool uc_update;
10986         u32 mask;
10987
10988         if (!test_bit(BNXT_STATE_OPEN, &bp->state))
10989                 return;
10990
10991         vnic = &bp->vnic_info[0];
10992         mask = vnic->rx_mask;
10993         mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
10994                   CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
10995                   CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
10996                   CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
10997
10998         if (dev->flags & IFF_PROMISC)
10999                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
11000
11001         uc_update = bnxt_uc_list_updated(bp);
11002
11003         if (dev->flags & IFF_BROADCAST)
11004                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
11005         if (dev->flags & IFF_ALLMULTI) {
11006                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
11007                 vnic->mc_list_count = 0;
11008         } else if (dev->flags & IFF_MULTICAST) {
11009                 mc_update = bnxt_mc_list_updated(bp, &mask);
11010         }
11011
11012         if (mask != vnic->rx_mask || uc_update || mc_update) {
11013                 vnic->rx_mask = mask;
11014
11015                 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
11016                 bnxt_queue_sp_work(bp);
11017         }
11018 }
11019
11020 static int bnxt_cfg_rx_mode(struct bnxt *bp)
11021 {
11022         struct net_device *dev = bp->dev;
11023         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
11024         struct hwrm_cfa_l2_filter_free_input *req;
11025         struct netdev_hw_addr *ha;
11026         int i, off = 0, rc;
11027         bool uc_update;
11028
11029         netif_addr_lock_bh(dev);
11030         uc_update = bnxt_uc_list_updated(bp);
11031         netif_addr_unlock_bh(dev);
11032
11033         if (!uc_update)
11034                 goto skip_uc;
11035
11036         rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
11037         if (rc)
11038                 return rc;
11039         hwrm_req_hold(bp, req);
11040         for (i = 1; i < vnic->uc_filter_count; i++) {
11041                 req->l2_filter_id = vnic->fw_l2_filter_id[i];
11042
11043                 rc = hwrm_req_send(bp, req);
11044         }
11045         hwrm_req_drop(bp, req);
11046
11047         vnic->uc_filter_count = 1;
11048
11049         netif_addr_lock_bh(dev);
11050         if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
11051                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
11052         } else {
11053                 netdev_for_each_uc_addr(ha, dev) {
11054                         memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
11055                         off += ETH_ALEN;
11056                         vnic->uc_filter_count++;
11057                 }
11058         }
11059         netif_addr_unlock_bh(dev);
11060
11061         for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
11062                 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
11063                 if (rc) {
11064                         if (BNXT_VF(bp) && rc == -ENODEV) {
11065                                 if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
11066                                         netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n");
11067                                 else
11068                                         netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n");
11069                                 rc = 0;
11070                         } else {
11071                                 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
11072                         }
11073                         vnic->uc_filter_count = i;
11074                         return rc;
11075                 }
11076         }
11077         if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
11078                 netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n");
11079
11080 skip_uc:
11081         if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
11082             !bnxt_promisc_ok(bp))
11083                 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
11084         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
11085         if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
11086                 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
11087                             rc);
11088                 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
11089                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
11090                 vnic->mc_list_count = 0;
11091                 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
11092         }
11093         if (rc)
11094                 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
11095                            rc);
11096
11097         return rc;
11098 }
11099
11100 static bool bnxt_can_reserve_rings(struct bnxt *bp)
11101 {
11102 #ifdef CONFIG_BNXT_SRIOV
11103         if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
11104                 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
11105
11106                 /* No minimum rings were provisioned by the PF.  Don't
11107                  * reserve rings by default when device is down.
11108                  */
11109                 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
11110                         return true;
11111
11112                 if (!netif_running(bp->dev))
11113                         return false;
11114         }
11115 #endif
11116         return true;
11117 }
11118
11119 /* If the chip and firmware supports RFS */
11120 static bool bnxt_rfs_supported(struct bnxt *bp)
11121 {
11122         if (bp->flags & BNXT_FLAG_CHIP_P5) {
11123                 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
11124                         return true;
11125                 return false;
11126         }
11127         /* 212 firmware is broken for aRFS */
11128         if (BNXT_FW_MAJ(bp) == 212)
11129                 return false;
11130         if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
11131                 return true;
11132         if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
11133                 return true;
11134         return false;
11135 }
11136
11137 /* If runtime conditions support RFS */
11138 static bool bnxt_rfs_capable(struct bnxt *bp)
11139 {
11140 #ifdef CONFIG_RFS_ACCEL
11141         int vnics, max_vnics, max_rss_ctxs;
11142
11143         if (bp->flags & BNXT_FLAG_CHIP_P5)
11144                 return bnxt_rfs_supported(bp);
11145         if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
11146                 return false;
11147
11148         vnics = 1 + bp->rx_nr_rings;
11149         max_vnics = bnxt_get_max_func_vnics(bp);
11150         max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
11151
11152         /* RSS contexts not a limiting factor */
11153         if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
11154                 max_rss_ctxs = max_vnics;
11155         if (vnics > max_vnics || vnics > max_rss_ctxs) {
11156                 if (bp->rx_nr_rings > 1)
11157                         netdev_warn(bp->dev,
11158                                     "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
11159                                     min(max_rss_ctxs - 1, max_vnics - 1));
11160                 return false;
11161         }
11162
11163         if (!BNXT_NEW_RM(bp))
11164                 return true;
11165
11166         if (vnics == bp->hw_resc.resv_vnics)
11167                 return true;
11168
11169         bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
11170         if (vnics <= bp->hw_resc.resv_vnics)
11171                 return true;
11172
11173         netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
11174         bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
11175         return false;
11176 #else
11177         return false;
11178 #endif
11179 }
11180
11181 static netdev_features_t bnxt_fix_features(struct net_device *dev,
11182                                            netdev_features_t features)
11183 {
11184         struct bnxt *bp = netdev_priv(dev);
11185         netdev_features_t vlan_features;
11186
11187         if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
11188                 features &= ~NETIF_F_NTUPLE;
11189
11190         if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog)
11191                 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
11192
11193         if (!(features & NETIF_F_GRO))
11194                 features &= ~NETIF_F_GRO_HW;
11195
11196         if (features & NETIF_F_GRO_HW)
11197                 features &= ~NETIF_F_LRO;
11198
11199         /* Both CTAG and STAG VLAN accelaration on the RX side have to be
11200          * turned on or off together.
11201          */
11202         vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
11203         if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
11204                 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
11205                         features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
11206                 else if (vlan_features)
11207                         features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
11208         }
11209 #ifdef CONFIG_BNXT_SRIOV
11210         if (BNXT_VF(bp) && bp->vf.vlan)
11211                 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
11212 #endif
11213         return features;
11214 }
11215
11216 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
11217 {
11218         struct bnxt *bp = netdev_priv(dev);
11219         u32 flags = bp->flags;
11220         u32 changes;
11221         int rc = 0;
11222         bool re_init = false;
11223         bool update_tpa = false;
11224
11225         flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
11226         if (features & NETIF_F_GRO_HW)
11227                 flags |= BNXT_FLAG_GRO;
11228         else if (features & NETIF_F_LRO)
11229                 flags |= BNXT_FLAG_LRO;
11230
11231         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
11232                 flags &= ~BNXT_FLAG_TPA;
11233
11234         if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
11235                 flags |= BNXT_FLAG_STRIP_VLAN;
11236
11237         if (features & NETIF_F_NTUPLE)
11238                 flags |= BNXT_FLAG_RFS;
11239
11240         changes = flags ^ bp->flags;
11241         if (changes & BNXT_FLAG_TPA) {
11242                 update_tpa = true;
11243                 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
11244                     (flags & BNXT_FLAG_TPA) == 0 ||
11245                     (bp->flags & BNXT_FLAG_CHIP_P5))
11246                         re_init = true;
11247         }
11248
11249         if (changes & ~BNXT_FLAG_TPA)
11250                 re_init = true;
11251
11252         if (flags != bp->flags) {
11253                 u32 old_flags = bp->flags;
11254
11255                 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11256                         bp->flags = flags;
11257                         if (update_tpa)
11258                                 bnxt_set_ring_params(bp);
11259                         return rc;
11260                 }
11261
11262                 if (re_init) {
11263                         bnxt_close_nic(bp, false, false);
11264                         bp->flags = flags;
11265                         if (update_tpa)
11266                                 bnxt_set_ring_params(bp);
11267
11268                         return bnxt_open_nic(bp, false, false);
11269                 }
11270                 if (update_tpa) {
11271                         bp->flags = flags;
11272                         rc = bnxt_set_tpa(bp,
11273                                           (flags & BNXT_FLAG_TPA) ?
11274                                           true : false);
11275                         if (rc)
11276                                 bp->flags = old_flags;
11277                 }
11278         }
11279         return rc;
11280 }
11281
11282 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
11283                               u8 **nextp)
11284 {
11285         struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
11286         int hdr_count = 0;
11287         u8 *nexthdr;
11288         int start;
11289
11290         /* Check that there are at most 2 IPv6 extension headers, no
11291          * fragment header, and each is <= 64 bytes.
11292          */
11293         start = nw_off + sizeof(*ip6h);
11294         nexthdr = &ip6h->nexthdr;
11295         while (ipv6_ext_hdr(*nexthdr)) {
11296                 struct ipv6_opt_hdr *hp;
11297                 int hdrlen;
11298
11299                 if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
11300                     *nexthdr == NEXTHDR_FRAGMENT)
11301                         return false;
11302                 hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
11303                                           skb_headlen(skb), NULL);
11304                 if (!hp)
11305                         return false;
11306                 if (*nexthdr == NEXTHDR_AUTH)
11307                         hdrlen = ipv6_authlen(hp);
11308                 else
11309                         hdrlen = ipv6_optlen(hp);
11310
11311                 if (hdrlen > 64)
11312                         return false;
11313                 nexthdr = &hp->nexthdr;
11314                 start += hdrlen;
11315                 hdr_count++;
11316         }
11317         if (nextp) {
11318                 /* Caller will check inner protocol */
11319                 if (skb->encapsulation) {
11320                         *nextp = nexthdr;
11321                         return true;
11322                 }
11323                 *nextp = NULL;
11324         }
11325         /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
11326         return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
11327 }
11328
11329 /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
11330 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
11331 {
11332         struct udphdr *uh = udp_hdr(skb);
11333         __be16 udp_port = uh->dest;
11334
11335         if (udp_port != bp->vxlan_port && udp_port != bp->nge_port)
11336                 return false;
11337         if (skb->inner_protocol_type == ENCAP_TYPE_ETHER) {
11338                 struct ethhdr *eh = inner_eth_hdr(skb);
11339
11340                 switch (eh->h_proto) {
11341                 case htons(ETH_P_IP):
11342                         return true;
11343                 case htons(ETH_P_IPV6):
11344                         return bnxt_exthdr_check(bp, skb,
11345                                                  skb_inner_network_offset(skb),
11346                                                  NULL);
11347                 }
11348         }
11349         return false;
11350 }
11351
11352 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
11353 {
11354         switch (l4_proto) {
11355         case IPPROTO_UDP:
11356                 return bnxt_udp_tunl_check(bp, skb);
11357         case IPPROTO_IPIP:
11358                 return true;
11359         case IPPROTO_GRE: {
11360                 switch (skb->inner_protocol) {
11361                 default:
11362                         return false;
11363                 case htons(ETH_P_IP):
11364                         return true;
11365                 case htons(ETH_P_IPV6):
11366                         fallthrough;
11367                 }
11368         }
11369         case IPPROTO_IPV6:
11370                 /* Check ext headers of inner ipv6 */
11371                 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
11372                                          NULL);
11373         }
11374         return false;
11375 }
11376
11377 static netdev_features_t bnxt_features_check(struct sk_buff *skb,
11378                                              struct net_device *dev,
11379                                              netdev_features_t features)
11380 {
11381         struct bnxt *bp = netdev_priv(dev);
11382         u8 *l4_proto;
11383
11384         features = vlan_features_check(skb, features);
11385         switch (vlan_get_protocol(skb)) {
11386         case htons(ETH_P_IP):
11387                 if (!skb->encapsulation)
11388                         return features;
11389                 l4_proto = &ip_hdr(skb)->protocol;
11390                 if (bnxt_tunl_check(bp, skb, *l4_proto))
11391                         return features;
11392                 break;
11393         case htons(ETH_P_IPV6):
11394                 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
11395                                        &l4_proto))
11396                         break;
11397                 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
11398                         return features;
11399                 break;
11400         }
11401         return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
11402 }
11403
11404 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
11405                          u32 *reg_buf)
11406 {
11407         struct hwrm_dbg_read_direct_output *resp;
11408         struct hwrm_dbg_read_direct_input *req;
11409         __le32 *dbg_reg_buf;
11410         dma_addr_t mapping;
11411         int rc, i;
11412
11413         rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT);
11414         if (rc)
11415                 return rc;
11416
11417         dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4,
11418                                          &mapping);
11419         if (!dbg_reg_buf) {
11420                 rc = -ENOMEM;
11421                 goto dbg_rd_reg_exit;
11422         }
11423
11424         req->host_dest_addr = cpu_to_le64(mapping);
11425
11426         resp = hwrm_req_hold(bp, req);
11427         req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
11428         req->read_len32 = cpu_to_le32(num_words);
11429
11430         rc = hwrm_req_send(bp, req);
11431         if (rc || resp->error_code) {
11432                 rc = -EIO;
11433                 goto dbg_rd_reg_exit;
11434         }
11435         for (i = 0; i < num_words; i++)
11436                 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
11437
11438 dbg_rd_reg_exit:
11439         hwrm_req_drop(bp, req);
11440         return rc;
11441 }
11442
11443 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
11444                                        u32 ring_id, u32 *prod, u32 *cons)
11445 {
11446         struct hwrm_dbg_ring_info_get_output *resp;
11447         struct hwrm_dbg_ring_info_get_input *req;
11448         int rc;
11449
11450         rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET);
11451         if (rc)
11452                 return rc;
11453
11454         req->ring_type = ring_type;
11455         req->fw_ring_id = cpu_to_le32(ring_id);
11456         resp = hwrm_req_hold(bp, req);
11457         rc = hwrm_req_send(bp, req);
11458         if (!rc) {
11459                 *prod = le32_to_cpu(resp->producer_index);
11460                 *cons = le32_to_cpu(resp->consumer_index);
11461         }
11462         hwrm_req_drop(bp, req);
11463         return rc;
11464 }
11465
11466 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
11467 {
11468         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
11469         int i = bnapi->index;
11470
11471         if (!txr)
11472                 return;
11473
11474         netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
11475                     i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
11476                     txr->tx_cons);
11477 }
11478
11479 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
11480 {
11481         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
11482         int i = bnapi->index;
11483
11484         if (!rxr)
11485                 return;
11486
11487         netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
11488                     i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
11489                     rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
11490                     rxr->rx_sw_agg_prod);
11491 }
11492
11493 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
11494 {
11495         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
11496         int i = bnapi->index;
11497
11498         netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
11499                     i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
11500 }
11501
11502 static void bnxt_dbg_dump_states(struct bnxt *bp)
11503 {
11504         int i;
11505         struct bnxt_napi *bnapi;
11506
11507         for (i = 0; i < bp->cp_nr_rings; i++) {
11508                 bnapi = bp->bnapi[i];
11509                 if (netif_msg_drv(bp)) {
11510                         bnxt_dump_tx_sw_state(bnapi);
11511                         bnxt_dump_rx_sw_state(bnapi);
11512                         bnxt_dump_cp_sw_state(bnapi);
11513                 }
11514         }
11515 }
11516
11517 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
11518 {
11519         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
11520         struct hwrm_ring_reset_input *req;
11521         struct bnxt_napi *bnapi = rxr->bnapi;
11522         struct bnxt_cp_ring_info *cpr;
11523         u16 cp_ring_id;
11524         int rc;
11525
11526         rc = hwrm_req_init(bp, req, HWRM_RING_RESET);
11527         if (rc)
11528                 return rc;
11529
11530         cpr = &bnapi->cp_ring;
11531         cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
11532         req->cmpl_ring = cpu_to_le16(cp_ring_id);
11533         req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
11534         req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
11535         return hwrm_req_send_silent(bp, req);
11536 }
11537
11538 static void bnxt_reset_task(struct bnxt *bp, bool silent)
11539 {
11540         if (!silent)
11541                 bnxt_dbg_dump_states(bp);
11542         if (netif_running(bp->dev)) {
11543                 int rc;
11544
11545                 if (silent) {
11546                         bnxt_close_nic(bp, false, false);
11547                         bnxt_open_nic(bp, false, false);
11548                 } else {
11549                         bnxt_ulp_stop(bp);
11550                         bnxt_close_nic(bp, true, false);
11551                         rc = bnxt_open_nic(bp, true, false);
11552                         bnxt_ulp_start(bp, rc);
11553                 }
11554         }
11555 }
11556
11557 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
11558 {
11559         struct bnxt *bp = netdev_priv(dev);
11560
11561         netdev_err(bp->dev,  "TX timeout detected, starting reset task!\n");
11562         set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
11563         bnxt_queue_sp_work(bp);
11564 }
11565
11566 static void bnxt_fw_health_check(struct bnxt *bp)
11567 {
11568         struct bnxt_fw_health *fw_health = bp->fw_health;
11569         u32 val;
11570
11571         if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11572                 return;
11573
11574         /* Make sure it is enabled before checking the tmr_counter. */
11575         smp_rmb();
11576         if (fw_health->tmr_counter) {
11577                 fw_health->tmr_counter--;
11578                 return;
11579         }
11580
11581         val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11582         if (val == fw_health->last_fw_heartbeat) {
11583                 fw_health->arrests++;
11584                 goto fw_reset;
11585         }
11586
11587         fw_health->last_fw_heartbeat = val;
11588
11589         val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11590         if (val != fw_health->last_fw_reset_cnt) {
11591                 fw_health->discoveries++;
11592                 goto fw_reset;
11593         }
11594
11595         fw_health->tmr_counter = fw_health->tmr_multiplier;
11596         return;
11597
11598 fw_reset:
11599         set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event);
11600         bnxt_queue_sp_work(bp);
11601 }
11602
11603 static void bnxt_timer(struct timer_list *t)
11604 {
11605         struct bnxt *bp = from_timer(bp, t, timer);
11606         struct net_device *dev = bp->dev;
11607
11608         if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
11609                 return;
11610
11611         if (atomic_read(&bp->intr_sem) != 0)
11612                 goto bnxt_restart_timer;
11613
11614         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
11615                 bnxt_fw_health_check(bp);
11616
11617         if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks) {
11618                 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
11619                 bnxt_queue_sp_work(bp);
11620         }
11621
11622         if (bnxt_tc_flower_enabled(bp)) {
11623                 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
11624                 bnxt_queue_sp_work(bp);
11625         }
11626
11627 #ifdef CONFIG_RFS_ACCEL
11628         if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) {
11629                 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
11630                 bnxt_queue_sp_work(bp);
11631         }
11632 #endif /*CONFIG_RFS_ACCEL*/
11633
11634         if (bp->link_info.phy_retry) {
11635                 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
11636                         bp->link_info.phy_retry = false;
11637                         netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
11638                 } else {
11639                         set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
11640                         bnxt_queue_sp_work(bp);
11641                 }
11642         }
11643
11644         if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) {
11645                 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
11646                 bnxt_queue_sp_work(bp);
11647         }
11648
11649         if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
11650             netif_carrier_ok(dev)) {
11651                 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
11652                 bnxt_queue_sp_work(bp);
11653         }
11654 bnxt_restart_timer:
11655         mod_timer(&bp->timer, jiffies + bp->current_interval);
11656 }
11657
11658 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
11659 {
11660         /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
11661          * set.  If the device is being closed, bnxt_close() may be holding
11662          * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear.  So we
11663          * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
11664          */
11665         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11666         rtnl_lock();
11667 }
11668
11669 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
11670 {
11671         set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11672         rtnl_unlock();
11673 }
11674
11675 /* Only called from bnxt_sp_task() */
11676 static void bnxt_reset(struct bnxt *bp, bool silent)
11677 {
11678         bnxt_rtnl_lock_sp(bp);
11679         if (test_bit(BNXT_STATE_OPEN, &bp->state))
11680                 bnxt_reset_task(bp, silent);
11681         bnxt_rtnl_unlock_sp(bp);
11682 }
11683
11684 /* Only called from bnxt_sp_task() */
11685 static void bnxt_rx_ring_reset(struct bnxt *bp)
11686 {
11687         int i;
11688
11689         bnxt_rtnl_lock_sp(bp);
11690         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11691                 bnxt_rtnl_unlock_sp(bp);
11692                 return;
11693         }
11694         /* Disable and flush TPA before resetting the RX ring */
11695         if (bp->flags & BNXT_FLAG_TPA)
11696                 bnxt_set_tpa(bp, false);
11697         for (i = 0; i < bp->rx_nr_rings; i++) {
11698                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
11699                 struct bnxt_cp_ring_info *cpr;
11700                 int rc;
11701
11702                 if (!rxr->bnapi->in_reset)
11703                         continue;
11704
11705                 rc = bnxt_hwrm_rx_ring_reset(bp, i);
11706                 if (rc) {
11707                         if (rc == -EINVAL || rc == -EOPNOTSUPP)
11708                                 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
11709                         else
11710                                 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
11711                                             rc);
11712                         bnxt_reset_task(bp, true);
11713                         break;
11714                 }
11715                 bnxt_free_one_rx_ring_skbs(bp, i);
11716                 rxr->rx_prod = 0;
11717                 rxr->rx_agg_prod = 0;
11718                 rxr->rx_sw_agg_prod = 0;
11719                 rxr->rx_next_cons = 0;
11720                 rxr->bnapi->in_reset = false;
11721                 bnxt_alloc_one_rx_ring(bp, i);
11722                 cpr = &rxr->bnapi->cp_ring;
11723                 cpr->sw_stats.rx.rx_resets++;
11724                 if (bp->flags & BNXT_FLAG_AGG_RINGS)
11725                         bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
11726                 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
11727         }
11728         if (bp->flags & BNXT_FLAG_TPA)
11729                 bnxt_set_tpa(bp, true);
11730         bnxt_rtnl_unlock_sp(bp);
11731 }
11732
11733 static void bnxt_fw_reset_close(struct bnxt *bp)
11734 {
11735         bnxt_ulp_stop(bp);
11736         /* When firmware is in fatal state, quiesce device and disable
11737          * bus master to prevent any potential bad DMAs before freeing
11738          * kernel memory.
11739          */
11740         if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
11741                 u16 val = 0;
11742
11743                 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
11744                 if (val == 0xffff)
11745                         bp->fw_reset_min_dsecs = 0;
11746                 bnxt_tx_disable(bp);
11747                 bnxt_disable_napi(bp);
11748                 bnxt_disable_int_sync(bp);
11749                 bnxt_free_irq(bp);
11750                 bnxt_clear_int_mode(bp);
11751                 pci_disable_device(bp->pdev);
11752         }
11753         __bnxt_close_nic(bp, true, false);
11754         bnxt_vf_reps_free(bp);
11755         bnxt_clear_int_mode(bp);
11756         bnxt_hwrm_func_drv_unrgtr(bp);
11757         if (pci_is_enabled(bp->pdev))
11758                 pci_disable_device(bp->pdev);
11759         bnxt_free_ctx_mem(bp);
11760         kfree(bp->ctx);
11761         bp->ctx = NULL;
11762 }
11763
11764 static bool is_bnxt_fw_ok(struct bnxt *bp)
11765 {
11766         struct bnxt_fw_health *fw_health = bp->fw_health;
11767         bool no_heartbeat = false, has_reset = false;
11768         u32 val;
11769
11770         val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11771         if (val == fw_health->last_fw_heartbeat)
11772                 no_heartbeat = true;
11773
11774         val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11775         if (val != fw_health->last_fw_reset_cnt)
11776                 has_reset = true;
11777
11778         if (!no_heartbeat && has_reset)
11779                 return true;
11780
11781         return false;
11782 }
11783
11784 /* rtnl_lock is acquired before calling this function */
11785 static void bnxt_force_fw_reset(struct bnxt *bp)
11786 {
11787         struct bnxt_fw_health *fw_health = bp->fw_health;
11788         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
11789         u32 wait_dsecs;
11790
11791         if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
11792             test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11793                 return;
11794
11795         if (ptp) {
11796                 spin_lock_bh(&ptp->ptp_lock);
11797                 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11798                 spin_unlock_bh(&ptp->ptp_lock);
11799         } else {
11800                 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11801         }
11802         bnxt_fw_reset_close(bp);
11803         wait_dsecs = fw_health->master_func_wait_dsecs;
11804         if (fw_health->primary) {
11805                 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
11806                         wait_dsecs = 0;
11807                 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
11808         } else {
11809                 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
11810                 wait_dsecs = fw_health->normal_func_wait_dsecs;
11811                 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11812         }
11813
11814         bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
11815         bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
11816         bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
11817 }
11818
11819 void bnxt_fw_exception(struct bnxt *bp)
11820 {
11821         netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
11822         set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
11823         bnxt_rtnl_lock_sp(bp);
11824         bnxt_force_fw_reset(bp);
11825         bnxt_rtnl_unlock_sp(bp);
11826 }
11827
11828 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
11829  * < 0 on error.
11830  */
11831 static int bnxt_get_registered_vfs(struct bnxt *bp)
11832 {
11833 #ifdef CONFIG_BNXT_SRIOV
11834         int rc;
11835
11836         if (!BNXT_PF(bp))
11837                 return 0;
11838
11839         rc = bnxt_hwrm_func_qcfg(bp);
11840         if (rc) {
11841                 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
11842                 return rc;
11843         }
11844         if (bp->pf.registered_vfs)
11845                 return bp->pf.registered_vfs;
11846         if (bp->sriov_cfg)
11847                 return 1;
11848 #endif
11849         return 0;
11850 }
11851
11852 void bnxt_fw_reset(struct bnxt *bp)
11853 {
11854         bnxt_rtnl_lock_sp(bp);
11855         if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
11856             !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
11857                 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
11858                 int n = 0, tmo;
11859
11860                 if (ptp) {
11861                         spin_lock_bh(&ptp->ptp_lock);
11862                         set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11863                         spin_unlock_bh(&ptp->ptp_lock);
11864                 } else {
11865                         set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11866                 }
11867                 if (bp->pf.active_vfs &&
11868                     !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
11869                         n = bnxt_get_registered_vfs(bp);
11870                 if (n < 0) {
11871                         netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
11872                                    n);
11873                         clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11874                         dev_close(bp->dev);
11875                         goto fw_reset_exit;
11876                 } else if (n > 0) {
11877                         u16 vf_tmo_dsecs = n * 10;
11878
11879                         if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
11880                                 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
11881                         bp->fw_reset_state =
11882                                 BNXT_FW_RESET_STATE_POLL_VF;
11883                         bnxt_queue_fw_reset_work(bp, HZ / 10);
11884                         goto fw_reset_exit;
11885                 }
11886                 bnxt_fw_reset_close(bp);
11887                 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11888                         bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
11889                         tmo = HZ / 10;
11890                 } else {
11891                         bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11892                         tmo = bp->fw_reset_min_dsecs * HZ / 10;
11893                 }
11894                 bnxt_queue_fw_reset_work(bp, tmo);
11895         }
11896 fw_reset_exit:
11897         bnxt_rtnl_unlock_sp(bp);
11898 }
11899
11900 static void bnxt_chk_missed_irq(struct bnxt *bp)
11901 {
11902         int i;
11903
11904         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
11905                 return;
11906
11907         for (i = 0; i < bp->cp_nr_rings; i++) {
11908                 struct bnxt_napi *bnapi = bp->bnapi[i];
11909                 struct bnxt_cp_ring_info *cpr;
11910                 u32 fw_ring_id;
11911                 int j;
11912
11913                 if (!bnapi)
11914                         continue;
11915
11916                 cpr = &bnapi->cp_ring;
11917                 for (j = 0; j < 2; j++) {
11918                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
11919                         u32 val[2];
11920
11921                         if (!cpr2 || cpr2->has_more_work ||
11922                             !bnxt_has_work(bp, cpr2))
11923                                 continue;
11924
11925                         if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
11926                                 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
11927                                 continue;
11928                         }
11929                         fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
11930                         bnxt_dbg_hwrm_ring_info_get(bp,
11931                                 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
11932                                 fw_ring_id, &val[0], &val[1]);
11933                         cpr->sw_stats.cmn.missed_irqs++;
11934                 }
11935         }
11936 }
11937
11938 static void bnxt_cfg_ntp_filters(struct bnxt *);
11939
11940 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
11941 {
11942         struct bnxt_link_info *link_info = &bp->link_info;
11943
11944         if (BNXT_AUTO_MODE(link_info->auto_mode)) {
11945                 link_info->autoneg = BNXT_AUTONEG_SPEED;
11946                 if (bp->hwrm_spec_code >= 0x10201) {
11947                         if (link_info->auto_pause_setting &
11948                             PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
11949                                 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11950                 } else {
11951                         link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11952                 }
11953                 link_info->advertising = link_info->auto_link_speeds;
11954                 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
11955         } else {
11956                 link_info->req_link_speed = link_info->force_link_speed;
11957                 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
11958                 if (link_info->force_pam4_link_speed) {
11959                         link_info->req_link_speed =
11960                                 link_info->force_pam4_link_speed;
11961                         link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
11962                 }
11963                 link_info->req_duplex = link_info->duplex_setting;
11964         }
11965         if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
11966                 link_info->req_flow_ctrl =
11967                         link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
11968         else
11969                 link_info->req_flow_ctrl = link_info->force_pause_setting;
11970 }
11971
11972 static void bnxt_fw_echo_reply(struct bnxt *bp)
11973 {
11974         struct bnxt_fw_health *fw_health = bp->fw_health;
11975         struct hwrm_func_echo_response_input *req;
11976         int rc;
11977
11978         rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE);
11979         if (rc)
11980                 return;
11981         req->event_data1 = cpu_to_le32(fw_health->echo_req_data1);
11982         req->event_data2 = cpu_to_le32(fw_health->echo_req_data2);
11983         hwrm_req_send(bp, req);
11984 }
11985
11986 static void bnxt_sp_task(struct work_struct *work)
11987 {
11988         struct bnxt *bp = container_of(work, struct bnxt, sp_task);
11989
11990         set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11991         smp_mb__after_atomic();
11992         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11993                 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11994                 return;
11995         }
11996
11997         if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
11998                 bnxt_cfg_rx_mode(bp);
11999
12000         if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
12001                 bnxt_cfg_ntp_filters(bp);
12002         if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
12003                 bnxt_hwrm_exec_fwd_req(bp);
12004         if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
12005                 bnxt_hwrm_port_qstats(bp, 0);
12006                 bnxt_hwrm_port_qstats_ext(bp, 0);
12007                 bnxt_accumulate_all_stats(bp);
12008         }
12009
12010         if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
12011                 int rc;
12012
12013                 mutex_lock(&bp->link_lock);
12014                 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
12015                                        &bp->sp_event))
12016                         bnxt_hwrm_phy_qcaps(bp);
12017
12018                 rc = bnxt_update_link(bp, true);
12019                 if (rc)
12020                         netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
12021                                    rc);
12022
12023                 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
12024                                        &bp->sp_event))
12025                         bnxt_init_ethtool_link_settings(bp);
12026                 mutex_unlock(&bp->link_lock);
12027         }
12028         if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
12029                 int rc;
12030
12031                 mutex_lock(&bp->link_lock);
12032                 rc = bnxt_update_phy_setting(bp);
12033                 mutex_unlock(&bp->link_lock);
12034                 if (rc) {
12035                         netdev_warn(bp->dev, "update phy settings retry failed\n");
12036                 } else {
12037                         bp->link_info.phy_retry = false;
12038                         netdev_info(bp->dev, "update phy settings retry succeeded\n");
12039                 }
12040         }
12041         if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
12042                 mutex_lock(&bp->link_lock);
12043                 bnxt_get_port_module_status(bp);
12044                 mutex_unlock(&bp->link_lock);
12045         }
12046
12047         if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
12048                 bnxt_tc_flow_stats_work(bp);
12049
12050         if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
12051                 bnxt_chk_missed_irq(bp);
12052
12053         if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
12054                 bnxt_fw_echo_reply(bp);
12055
12056         /* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
12057          * must be the last functions to be called before exiting.
12058          */
12059         if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
12060                 bnxt_reset(bp, false);
12061
12062         if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
12063                 bnxt_reset(bp, true);
12064
12065         if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
12066                 bnxt_rx_ring_reset(bp);
12067
12068         if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) {
12069                 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
12070                     test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
12071                         bnxt_devlink_health_fw_report(bp);
12072                 else
12073                         bnxt_fw_reset(bp);
12074         }
12075
12076         if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
12077                 if (!is_bnxt_fw_ok(bp))
12078                         bnxt_devlink_health_fw_report(bp);
12079         }
12080
12081         smp_mb__before_atomic();
12082         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
12083 }
12084
12085 /* Under rtnl_lock */
12086 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
12087                      int tx_xdp)
12088 {
12089         int max_rx, max_tx, tx_sets = 1;
12090         int tx_rings_needed, stats;
12091         int rx_rings = rx;
12092         int cp, vnics, rc;
12093
12094         if (tcs)
12095                 tx_sets = tcs;
12096
12097         rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
12098         if (rc)
12099                 return rc;
12100
12101         if (max_rx < rx)
12102                 return -ENOMEM;
12103
12104         tx_rings_needed = tx * tx_sets + tx_xdp;
12105         if (max_tx < tx_rings_needed)
12106                 return -ENOMEM;
12107
12108         vnics = 1;
12109         if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
12110                 vnics += rx_rings;
12111
12112         if (bp->flags & BNXT_FLAG_AGG_RINGS)
12113                 rx_rings <<= 1;
12114         cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
12115         stats = cp;
12116         if (BNXT_NEW_RM(bp)) {
12117                 cp += bnxt_get_ulp_msix_num(bp);
12118                 stats += bnxt_get_ulp_stat_ctxs(bp);
12119         }
12120         return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
12121                                      stats, vnics);
12122 }
12123
12124 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
12125 {
12126         if (bp->bar2) {
12127                 pci_iounmap(pdev, bp->bar2);
12128                 bp->bar2 = NULL;
12129         }
12130
12131         if (bp->bar1) {
12132                 pci_iounmap(pdev, bp->bar1);
12133                 bp->bar1 = NULL;
12134         }
12135
12136         if (bp->bar0) {
12137                 pci_iounmap(pdev, bp->bar0);
12138                 bp->bar0 = NULL;
12139         }
12140 }
12141
12142 static void bnxt_cleanup_pci(struct bnxt *bp)
12143 {
12144         bnxt_unmap_bars(bp, bp->pdev);
12145         pci_release_regions(bp->pdev);
12146         if (pci_is_enabled(bp->pdev))
12147                 pci_disable_device(bp->pdev);
12148 }
12149
12150 static void bnxt_init_dflt_coal(struct bnxt *bp)
12151 {
12152         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
12153         struct bnxt_coal *coal;
12154         u16 flags = 0;
12155
12156         if (coal_cap->cmpl_params &
12157             RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
12158                 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
12159
12160         /* Tick values in micro seconds.
12161          * 1 coal_buf x bufs_per_record = 1 completion record.
12162          */
12163         coal = &bp->rx_coal;
12164         coal->coal_ticks = 10;
12165         coal->coal_bufs = 30;
12166         coal->coal_ticks_irq = 1;
12167         coal->coal_bufs_irq = 2;
12168         coal->idle_thresh = 50;
12169         coal->bufs_per_record = 2;
12170         coal->budget = 64;              /* NAPI budget */
12171         coal->flags = flags;
12172
12173         coal = &bp->tx_coal;
12174         coal->coal_ticks = 28;
12175         coal->coal_bufs = 30;
12176         coal->coal_ticks_irq = 2;
12177         coal->coal_bufs_irq = 2;
12178         coal->bufs_per_record = 1;
12179         coal->flags = flags;
12180
12181         bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
12182 }
12183
12184 static int bnxt_fw_init_one_p1(struct bnxt *bp)
12185 {
12186         int rc;
12187
12188         bp->fw_cap = 0;
12189         rc = bnxt_hwrm_ver_get(bp);
12190         bnxt_try_map_fw_health_reg(bp);
12191         if (rc) {
12192                 rc = bnxt_try_recover_fw(bp);
12193                 if (rc)
12194                         return rc;
12195                 rc = bnxt_hwrm_ver_get(bp);
12196                 if (rc)
12197                         return rc;
12198         }
12199
12200         bnxt_nvm_cfg_ver_get(bp);
12201
12202         rc = bnxt_hwrm_func_reset(bp);
12203         if (rc)
12204                 return -ENODEV;
12205
12206         bnxt_hwrm_fw_set_time(bp);
12207         return 0;
12208 }
12209
12210 static int bnxt_fw_init_one_p2(struct bnxt *bp)
12211 {
12212         int rc;
12213
12214         /* Get the MAX capabilities for this function */
12215         rc = bnxt_hwrm_func_qcaps(bp);
12216         if (rc) {
12217                 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
12218                            rc);
12219                 return -ENODEV;
12220         }
12221
12222         rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
12223         if (rc)
12224                 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
12225                             rc);
12226
12227         if (bnxt_alloc_fw_health(bp)) {
12228                 netdev_warn(bp->dev, "no memory for firmware error recovery\n");
12229         } else {
12230                 rc = bnxt_hwrm_error_recovery_qcfg(bp);
12231                 if (rc)
12232                         netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
12233                                     rc);
12234         }
12235
12236         rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
12237         if (rc)
12238                 return -ENODEV;
12239
12240         bnxt_hwrm_func_qcfg(bp);
12241         bnxt_hwrm_vnic_qcaps(bp);
12242         bnxt_hwrm_port_led_qcaps(bp);
12243         bnxt_ethtool_init(bp);
12244         bnxt_dcb_init(bp);
12245         return 0;
12246 }
12247
12248 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
12249 {
12250         bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
12251         bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
12252                            VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
12253                            VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
12254                            VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
12255         if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
12256                 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
12257                 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
12258                                     VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
12259         }
12260 }
12261
12262 static void bnxt_set_dflt_rfs(struct bnxt *bp)
12263 {
12264         struct net_device *dev = bp->dev;
12265
12266         dev->hw_features &= ~NETIF_F_NTUPLE;
12267         dev->features &= ~NETIF_F_NTUPLE;
12268         bp->flags &= ~BNXT_FLAG_RFS;
12269         if (bnxt_rfs_supported(bp)) {
12270                 dev->hw_features |= NETIF_F_NTUPLE;
12271                 if (bnxt_rfs_capable(bp)) {
12272                         bp->flags |= BNXT_FLAG_RFS;
12273                         dev->features |= NETIF_F_NTUPLE;
12274                 }
12275         }
12276 }
12277
12278 static void bnxt_fw_init_one_p3(struct bnxt *bp)
12279 {
12280         struct pci_dev *pdev = bp->pdev;
12281
12282         bnxt_set_dflt_rss_hash_type(bp);
12283         bnxt_set_dflt_rfs(bp);
12284
12285         bnxt_get_wol_settings(bp);
12286         if (bp->flags & BNXT_FLAG_WOL_CAP)
12287                 device_set_wakeup_enable(&pdev->dev, bp->wol);
12288         else
12289                 device_set_wakeup_capable(&pdev->dev, false);
12290
12291         bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
12292         bnxt_hwrm_coal_params_qcaps(bp);
12293 }
12294
12295 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
12296
12297 int bnxt_fw_init_one(struct bnxt *bp)
12298 {
12299         int rc;
12300
12301         rc = bnxt_fw_init_one_p1(bp);
12302         if (rc) {
12303                 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
12304                 return rc;
12305         }
12306         rc = bnxt_fw_init_one_p2(bp);
12307         if (rc) {
12308                 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
12309                 return rc;
12310         }
12311         rc = bnxt_probe_phy(bp, false);
12312         if (rc)
12313                 return rc;
12314         rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
12315         if (rc)
12316                 return rc;
12317
12318         bnxt_fw_init_one_p3(bp);
12319         return 0;
12320 }
12321
12322 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
12323 {
12324         struct bnxt_fw_health *fw_health = bp->fw_health;
12325         u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
12326         u32 val = fw_health->fw_reset_seq_vals[reg_idx];
12327         u32 reg_type, reg_off, delay_msecs;
12328
12329         delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
12330         reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
12331         reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
12332         switch (reg_type) {
12333         case BNXT_FW_HEALTH_REG_TYPE_CFG:
12334                 pci_write_config_dword(bp->pdev, reg_off, val);
12335                 break;
12336         case BNXT_FW_HEALTH_REG_TYPE_GRC:
12337                 writel(reg_off & BNXT_GRC_BASE_MASK,
12338                        bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
12339                 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
12340                 fallthrough;
12341         case BNXT_FW_HEALTH_REG_TYPE_BAR0:
12342                 writel(val, bp->bar0 + reg_off);
12343                 break;
12344         case BNXT_FW_HEALTH_REG_TYPE_BAR1:
12345                 writel(val, bp->bar1 + reg_off);
12346                 break;
12347         }
12348         if (delay_msecs) {
12349                 pci_read_config_dword(bp->pdev, 0, &val);
12350                 msleep(delay_msecs);
12351         }
12352 }
12353
12354 bool bnxt_hwrm_reset_permitted(struct bnxt *bp)
12355 {
12356         struct hwrm_func_qcfg_output *resp;
12357         struct hwrm_func_qcfg_input *req;
12358         bool result = true; /* firmware will enforce if unknown */
12359
12360         if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
12361                 return result;
12362
12363         if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG))
12364                 return result;
12365
12366         req->fid = cpu_to_le16(0xffff);
12367         resp = hwrm_req_hold(bp, req);
12368         if (!hwrm_req_send(bp, req))
12369                 result = !!(le16_to_cpu(resp->flags) &
12370                             FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED);
12371         hwrm_req_drop(bp, req);
12372         return result;
12373 }
12374
12375 static void bnxt_reset_all(struct bnxt *bp)
12376 {
12377         struct bnxt_fw_health *fw_health = bp->fw_health;
12378         int i, rc;
12379
12380         if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
12381                 bnxt_fw_reset_via_optee(bp);
12382                 bp->fw_reset_timestamp = jiffies;
12383                 return;
12384         }
12385
12386         if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
12387                 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
12388                         bnxt_fw_reset_writel(bp, i);
12389         } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
12390                 struct hwrm_fw_reset_input *req;
12391
12392                 rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
12393                 if (!rc) {
12394                         req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG);
12395                         req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
12396                         req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
12397                         req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
12398                         rc = hwrm_req_send(bp, req);
12399                 }
12400                 if (rc != -ENODEV)
12401                         netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
12402         }
12403         bp->fw_reset_timestamp = jiffies;
12404 }
12405
12406 static bool bnxt_fw_reset_timeout(struct bnxt *bp)
12407 {
12408         return time_after(jiffies, bp->fw_reset_timestamp +
12409                           (bp->fw_reset_max_dsecs * HZ / 10));
12410 }
12411
12412 static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
12413 {
12414         clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12415         if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
12416                 bnxt_ulp_start(bp, rc);
12417                 bnxt_dl_health_fw_status_update(bp, false);
12418         }
12419         bp->fw_reset_state = 0;
12420         dev_close(bp->dev);
12421 }
12422
12423 static void bnxt_fw_reset_task(struct work_struct *work)
12424 {
12425         struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
12426         int rc = 0;
12427
12428         if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
12429                 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
12430                 return;
12431         }
12432
12433         switch (bp->fw_reset_state) {
12434         case BNXT_FW_RESET_STATE_POLL_VF: {
12435                 int n = bnxt_get_registered_vfs(bp);
12436                 int tmo;
12437
12438                 if (n < 0) {
12439                         netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
12440                                    n, jiffies_to_msecs(jiffies -
12441                                    bp->fw_reset_timestamp));
12442                         goto fw_reset_abort;
12443                 } else if (n > 0) {
12444                         if (bnxt_fw_reset_timeout(bp)) {
12445                                 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12446                                 bp->fw_reset_state = 0;
12447                                 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
12448                                            n);
12449                                 return;
12450                         }
12451                         bnxt_queue_fw_reset_work(bp, HZ / 10);
12452                         return;
12453                 }
12454                 bp->fw_reset_timestamp = jiffies;
12455                 rtnl_lock();
12456                 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
12457                         bnxt_fw_reset_abort(bp, rc);
12458                         rtnl_unlock();
12459                         return;
12460                 }
12461                 bnxt_fw_reset_close(bp);
12462                 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
12463                         bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
12464                         tmo = HZ / 10;
12465                 } else {
12466                         bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12467                         tmo = bp->fw_reset_min_dsecs * HZ / 10;
12468                 }
12469                 rtnl_unlock();
12470                 bnxt_queue_fw_reset_work(bp, tmo);
12471                 return;
12472         }
12473         case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
12474                 u32 val;
12475
12476                 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12477                 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
12478                     !bnxt_fw_reset_timeout(bp)) {
12479                         bnxt_queue_fw_reset_work(bp, HZ / 5);
12480                         return;
12481                 }
12482
12483                 if (!bp->fw_health->primary) {
12484                         u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
12485
12486                         bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12487                         bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
12488                         return;
12489                 }
12490                 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
12491         }
12492                 fallthrough;
12493         case BNXT_FW_RESET_STATE_RESET_FW:
12494                 bnxt_reset_all(bp);
12495                 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12496                 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
12497                 return;
12498         case BNXT_FW_RESET_STATE_ENABLE_DEV:
12499                 bnxt_inv_fw_health_reg(bp);
12500                 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
12501                     !bp->fw_reset_min_dsecs) {
12502                         u16 val;
12503
12504                         pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
12505                         if (val == 0xffff) {
12506                                 if (bnxt_fw_reset_timeout(bp)) {
12507                                         netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
12508                                         rc = -ETIMEDOUT;
12509                                         goto fw_reset_abort;
12510                                 }
12511                                 bnxt_queue_fw_reset_work(bp, HZ / 1000);
12512                                 return;
12513                         }
12514                 }
12515                 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
12516                 clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
12517                 if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) &&
12518                     !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state))
12519                         bnxt_dl_remote_reload(bp);
12520                 if (pci_enable_device(bp->pdev)) {
12521                         netdev_err(bp->dev, "Cannot re-enable PCI device\n");
12522                         rc = -ENODEV;
12523                         goto fw_reset_abort;
12524                 }
12525                 pci_set_master(bp->pdev);
12526                 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
12527                 fallthrough;
12528         case BNXT_FW_RESET_STATE_POLL_FW:
12529                 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
12530                 rc = bnxt_hwrm_poll(bp);
12531                 if (rc) {
12532                         if (bnxt_fw_reset_timeout(bp)) {
12533                                 netdev_err(bp->dev, "Firmware reset aborted\n");
12534                                 goto fw_reset_abort_status;
12535                         }
12536                         bnxt_queue_fw_reset_work(bp, HZ / 5);
12537                         return;
12538                 }
12539                 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
12540                 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
12541                 fallthrough;
12542         case BNXT_FW_RESET_STATE_OPENING:
12543                 while (!rtnl_trylock()) {
12544                         bnxt_queue_fw_reset_work(bp, HZ / 10);
12545                         return;
12546                 }
12547                 rc = bnxt_open(bp->dev);
12548                 if (rc) {
12549                         netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
12550                         bnxt_fw_reset_abort(bp, rc);
12551                         rtnl_unlock();
12552                         return;
12553                 }
12554
12555                 if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
12556                     bp->fw_health->enabled) {
12557                         bp->fw_health->last_fw_reset_cnt =
12558                                 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
12559                 }
12560                 bp->fw_reset_state = 0;
12561                 /* Make sure fw_reset_state is 0 before clearing the flag */
12562                 smp_mb__before_atomic();
12563                 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12564                 bnxt_ulp_start(bp, 0);
12565                 bnxt_reenable_sriov(bp);
12566                 bnxt_vf_reps_alloc(bp);
12567                 bnxt_vf_reps_open(bp);
12568                 bnxt_ptp_reapply_pps(bp);
12569                 clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
12570                 if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) {
12571                         bnxt_dl_health_fw_recovery_done(bp);
12572                         bnxt_dl_health_fw_status_update(bp, true);
12573                 }
12574                 rtnl_unlock();
12575                 break;
12576         }
12577         return;
12578
12579 fw_reset_abort_status:
12580         if (bp->fw_health->status_reliable ||
12581             (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
12582                 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12583
12584                 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
12585         }
12586 fw_reset_abort:
12587         rtnl_lock();
12588         bnxt_fw_reset_abort(bp, rc);
12589         rtnl_unlock();
12590 }
12591
12592 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
12593 {
12594         int rc;
12595         struct bnxt *bp = netdev_priv(dev);
12596
12597         SET_NETDEV_DEV(dev, &pdev->dev);
12598
12599         /* enable device (incl. PCI PM wakeup), and bus-mastering */
12600         rc = pci_enable_device(pdev);
12601         if (rc) {
12602                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
12603                 goto init_err;
12604         }
12605
12606         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12607                 dev_err(&pdev->dev,
12608                         "Cannot find PCI device base address, aborting\n");
12609                 rc = -ENODEV;
12610                 goto init_err_disable;
12611         }
12612
12613         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12614         if (rc) {
12615                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
12616                 goto init_err_disable;
12617         }
12618
12619         if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
12620             dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
12621                 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
12622                 rc = -EIO;
12623                 goto init_err_release;
12624         }
12625
12626         pci_set_master(pdev);
12627
12628         bp->dev = dev;
12629         bp->pdev = pdev;
12630
12631         /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
12632          * determines the BAR size.
12633          */
12634         bp->bar0 = pci_ioremap_bar(pdev, 0);
12635         if (!bp->bar0) {
12636                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
12637                 rc = -ENOMEM;
12638                 goto init_err_release;
12639         }
12640
12641         bp->bar2 = pci_ioremap_bar(pdev, 4);
12642         if (!bp->bar2) {
12643                 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
12644                 rc = -ENOMEM;
12645                 goto init_err_release;
12646         }
12647
12648         pci_enable_pcie_error_reporting(pdev);
12649
12650         INIT_WORK(&bp->sp_task, bnxt_sp_task);
12651         INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
12652
12653         spin_lock_init(&bp->ntp_fltr_lock);
12654 #if BITS_PER_LONG == 32
12655         spin_lock_init(&bp->db_lock);
12656 #endif
12657
12658         bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
12659         bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
12660
12661         timer_setup(&bp->timer, bnxt_timer, 0);
12662         bp->current_interval = BNXT_TIMER_INTERVAL;
12663
12664         bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
12665         bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
12666
12667         clear_bit(BNXT_STATE_OPEN, &bp->state);
12668         return 0;
12669
12670 init_err_release:
12671         bnxt_unmap_bars(bp, pdev);
12672         pci_release_regions(pdev);
12673
12674 init_err_disable:
12675         pci_disable_device(pdev);
12676
12677 init_err:
12678         return rc;
12679 }
12680
12681 /* rtnl_lock held */
12682 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
12683 {
12684         struct sockaddr *addr = p;
12685         struct bnxt *bp = netdev_priv(dev);
12686         int rc = 0;
12687
12688         if (!is_valid_ether_addr(addr->sa_data))
12689                 return -EADDRNOTAVAIL;
12690
12691         if (ether_addr_equal(addr->sa_data, dev->dev_addr))
12692                 return 0;
12693
12694         rc = bnxt_approve_mac(bp, addr->sa_data, true);
12695         if (rc)
12696                 return rc;
12697
12698         eth_hw_addr_set(dev, addr->sa_data);
12699         if (netif_running(dev)) {
12700                 bnxt_close_nic(bp, false, false);
12701                 rc = bnxt_open_nic(bp, false, false);
12702         }
12703
12704         return rc;
12705 }
12706
12707 /* rtnl_lock held */
12708 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
12709 {
12710         struct bnxt *bp = netdev_priv(dev);
12711
12712         if (netif_running(dev))
12713                 bnxt_close_nic(bp, true, false);
12714
12715         dev->mtu = new_mtu;
12716         bnxt_set_ring_params(bp);
12717
12718         if (netif_running(dev))
12719                 return bnxt_open_nic(bp, true, false);
12720
12721         return 0;
12722 }
12723
12724 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
12725 {
12726         struct bnxt *bp = netdev_priv(dev);
12727         bool sh = false;
12728         int rc;
12729
12730         if (tc > bp->max_tc) {
12731                 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
12732                            tc, bp->max_tc);
12733                 return -EINVAL;
12734         }
12735
12736         if (netdev_get_num_tc(dev) == tc)
12737                 return 0;
12738
12739         if (bp->flags & BNXT_FLAG_SHARED_RINGS)
12740                 sh = true;
12741
12742         rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
12743                               sh, tc, bp->tx_nr_rings_xdp);
12744         if (rc)
12745                 return rc;
12746
12747         /* Needs to close the device and do hw resource re-allocations */
12748         if (netif_running(bp->dev))
12749                 bnxt_close_nic(bp, true, false);
12750
12751         if (tc) {
12752                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
12753                 netdev_set_num_tc(dev, tc);
12754         } else {
12755                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12756                 netdev_reset_tc(dev);
12757         }
12758         bp->tx_nr_rings += bp->tx_nr_rings_xdp;
12759         bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
12760                                bp->tx_nr_rings + bp->rx_nr_rings;
12761
12762         if (netif_running(bp->dev))
12763                 return bnxt_open_nic(bp, true, false);
12764
12765         return 0;
12766 }
12767
12768 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
12769                                   void *cb_priv)
12770 {
12771         struct bnxt *bp = cb_priv;
12772
12773         if (!bnxt_tc_flower_enabled(bp) ||
12774             !tc_cls_can_offload_and_chain0(bp->dev, type_data))
12775                 return -EOPNOTSUPP;
12776
12777         switch (type) {
12778         case TC_SETUP_CLSFLOWER:
12779                 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
12780         default:
12781                 return -EOPNOTSUPP;
12782         }
12783 }
12784
12785 LIST_HEAD(bnxt_block_cb_list);
12786
12787 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
12788                          void *type_data)
12789 {
12790         struct bnxt *bp = netdev_priv(dev);
12791
12792         switch (type) {
12793         case TC_SETUP_BLOCK:
12794                 return flow_block_cb_setup_simple(type_data,
12795                                                   &bnxt_block_cb_list,
12796                                                   bnxt_setup_tc_block_cb,
12797                                                   bp, bp, true);
12798         case TC_SETUP_QDISC_MQPRIO: {
12799                 struct tc_mqprio_qopt *mqprio = type_data;
12800
12801                 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
12802
12803                 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
12804         }
12805         default:
12806                 return -EOPNOTSUPP;
12807         }
12808 }
12809
12810 #ifdef CONFIG_RFS_ACCEL
12811 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
12812                             struct bnxt_ntuple_filter *f2)
12813 {
12814         struct flow_keys *keys1 = &f1->fkeys;
12815         struct flow_keys *keys2 = &f2->fkeys;
12816
12817         if (keys1->basic.n_proto != keys2->basic.n_proto ||
12818             keys1->basic.ip_proto != keys2->basic.ip_proto)
12819                 return false;
12820
12821         if (keys1->basic.n_proto == htons(ETH_P_IP)) {
12822                 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
12823                     keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)
12824                         return false;
12825         } else {
12826                 if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src,
12827                            sizeof(keys1->addrs.v6addrs.src)) ||
12828                     memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst,
12829                            sizeof(keys1->addrs.v6addrs.dst)))
12830                         return false;
12831         }
12832
12833         if (keys1->ports.ports == keys2->ports.ports &&
12834             keys1->control.flags == keys2->control.flags &&
12835             ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
12836             ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
12837                 return true;
12838
12839         return false;
12840 }
12841
12842 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
12843                               u16 rxq_index, u32 flow_id)
12844 {
12845         struct bnxt *bp = netdev_priv(dev);
12846         struct bnxt_ntuple_filter *fltr, *new_fltr;
12847         struct flow_keys *fkeys;
12848         struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
12849         int rc = 0, idx, bit_id, l2_idx = 0;
12850         struct hlist_head *head;
12851         u32 flags;
12852
12853         if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
12854                 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
12855                 int off = 0, j;
12856
12857                 netif_addr_lock_bh(dev);
12858                 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
12859                         if (ether_addr_equal(eth->h_dest,
12860                                              vnic->uc_list + off)) {
12861                                 l2_idx = j + 1;
12862                                 break;
12863                         }
12864                 }
12865                 netif_addr_unlock_bh(dev);
12866                 if (!l2_idx)
12867                         return -EINVAL;
12868         }
12869         new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
12870         if (!new_fltr)
12871                 return -ENOMEM;
12872
12873         fkeys = &new_fltr->fkeys;
12874         if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
12875                 rc = -EPROTONOSUPPORT;
12876                 goto err_free;
12877         }
12878
12879         if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
12880              fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
12881             ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
12882              (fkeys->basic.ip_proto != IPPROTO_UDP))) {
12883                 rc = -EPROTONOSUPPORT;
12884                 goto err_free;
12885         }
12886         if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
12887             bp->hwrm_spec_code < 0x10601) {
12888                 rc = -EPROTONOSUPPORT;
12889                 goto err_free;
12890         }
12891         flags = fkeys->control.flags;
12892         if (((flags & FLOW_DIS_ENCAPSULATION) &&
12893              bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
12894                 rc = -EPROTONOSUPPORT;
12895                 goto err_free;
12896         }
12897
12898         memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
12899         memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
12900
12901         idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
12902         head = &bp->ntp_fltr_hash_tbl[idx];
12903         rcu_read_lock();
12904         hlist_for_each_entry_rcu(fltr, head, hash) {
12905                 if (bnxt_fltr_match(fltr, new_fltr)) {
12906                         rc = fltr->sw_id;
12907                         rcu_read_unlock();
12908                         goto err_free;
12909                 }
12910         }
12911         rcu_read_unlock();
12912
12913         spin_lock_bh(&bp->ntp_fltr_lock);
12914         bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
12915                                          BNXT_NTP_FLTR_MAX_FLTR, 0);
12916         if (bit_id < 0) {
12917                 spin_unlock_bh(&bp->ntp_fltr_lock);
12918                 rc = -ENOMEM;
12919                 goto err_free;
12920         }
12921
12922         new_fltr->sw_id = (u16)bit_id;
12923         new_fltr->flow_id = flow_id;
12924         new_fltr->l2_fltr_idx = l2_idx;
12925         new_fltr->rxq = rxq_index;
12926         hlist_add_head_rcu(&new_fltr->hash, head);
12927         bp->ntp_fltr_count++;
12928         spin_unlock_bh(&bp->ntp_fltr_lock);
12929
12930         set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
12931         bnxt_queue_sp_work(bp);
12932
12933         return new_fltr->sw_id;
12934
12935 err_free:
12936         kfree(new_fltr);
12937         return rc;
12938 }
12939
12940 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12941 {
12942         int i;
12943
12944         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
12945                 struct hlist_head *head;
12946                 struct hlist_node *tmp;
12947                 struct bnxt_ntuple_filter *fltr;
12948                 int rc;
12949
12950                 head = &bp->ntp_fltr_hash_tbl[i];
12951                 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
12952                         bool del = false;
12953
12954                         if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
12955                                 if (rps_may_expire_flow(bp->dev, fltr->rxq,
12956                                                         fltr->flow_id,
12957                                                         fltr->sw_id)) {
12958                                         bnxt_hwrm_cfa_ntuple_filter_free(bp,
12959                                                                          fltr);
12960                                         del = true;
12961                                 }
12962                         } else {
12963                                 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
12964                                                                        fltr);
12965                                 if (rc)
12966                                         del = true;
12967                                 else
12968                                         set_bit(BNXT_FLTR_VALID, &fltr->state);
12969                         }
12970
12971                         if (del) {
12972                                 spin_lock_bh(&bp->ntp_fltr_lock);
12973                                 hlist_del_rcu(&fltr->hash);
12974                                 bp->ntp_fltr_count--;
12975                                 spin_unlock_bh(&bp->ntp_fltr_lock);
12976                                 synchronize_rcu();
12977                                 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
12978                                 kfree(fltr);
12979                         }
12980                 }
12981         }
12982         if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
12983                 netdev_info(bp->dev, "Receive PF driver unload event!\n");
12984 }
12985
12986 #else
12987
12988 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12989 {
12990 }
12991
12992 #endif /* CONFIG_RFS_ACCEL */
12993
12994 static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
12995 {
12996         struct bnxt *bp = netdev_priv(netdev);
12997         struct udp_tunnel_info ti;
12998         unsigned int cmd;
12999
13000         udp_tunnel_nic_get_port(netdev, table, 0, &ti);
13001         if (ti.type == UDP_TUNNEL_TYPE_VXLAN)
13002                 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
13003         else
13004                 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
13005
13006         if (ti.port)
13007                 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
13008
13009         return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
13010 }
13011
13012 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
13013         .sync_table     = bnxt_udp_tunnel_sync,
13014         .flags          = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
13015                           UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
13016         .tables         = {
13017                 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
13018                 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
13019         },
13020 };
13021
13022 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
13023                                struct net_device *dev, u32 filter_mask,
13024                                int nlflags)
13025 {
13026         struct bnxt *bp = netdev_priv(dev);
13027
13028         return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
13029                                        nlflags, filter_mask, NULL);
13030 }
13031
13032 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
13033                                u16 flags, struct netlink_ext_ack *extack)
13034 {
13035         struct bnxt *bp = netdev_priv(dev);
13036         struct nlattr *attr, *br_spec;
13037         int rem, rc = 0;
13038
13039         if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
13040                 return -EOPNOTSUPP;
13041
13042         br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
13043         if (!br_spec)
13044                 return -EINVAL;
13045
13046         nla_for_each_nested(attr, br_spec, rem) {
13047                 u16 mode;
13048
13049                 if (nla_type(attr) != IFLA_BRIDGE_MODE)
13050                         continue;
13051
13052                 if (nla_len(attr) < sizeof(mode))
13053                         return -EINVAL;
13054
13055                 mode = nla_get_u16(attr);
13056                 if (mode == bp->br_mode)
13057                         break;
13058
13059                 rc = bnxt_hwrm_set_br_mode(bp, mode);
13060                 if (!rc)
13061                         bp->br_mode = mode;
13062                 break;
13063         }
13064         return rc;
13065 }
13066
13067 int bnxt_get_port_parent_id(struct net_device *dev,
13068                             struct netdev_phys_item_id *ppid)
13069 {
13070         struct bnxt *bp = netdev_priv(dev);
13071
13072         if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
13073                 return -EOPNOTSUPP;
13074
13075         /* The PF and it's VF-reps only support the switchdev framework */
13076         if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
13077                 return -EOPNOTSUPP;
13078
13079         ppid->id_len = sizeof(bp->dsn);
13080         memcpy(ppid->id, bp->dsn, ppid->id_len);
13081
13082         return 0;
13083 }
13084
13085 static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev)
13086 {
13087         struct bnxt *bp = netdev_priv(dev);
13088
13089         return &bp->dl_port;
13090 }
13091
13092 static const struct net_device_ops bnxt_netdev_ops = {
13093         .ndo_open               = bnxt_open,
13094         .ndo_start_xmit         = bnxt_start_xmit,
13095         .ndo_stop               = bnxt_close,
13096         .ndo_get_stats64        = bnxt_get_stats64,
13097         .ndo_set_rx_mode        = bnxt_set_rx_mode,
13098         .ndo_eth_ioctl          = bnxt_ioctl,
13099         .ndo_validate_addr      = eth_validate_addr,
13100         .ndo_set_mac_address    = bnxt_change_mac_addr,
13101         .ndo_change_mtu         = bnxt_change_mtu,
13102         .ndo_fix_features       = bnxt_fix_features,
13103         .ndo_set_features       = bnxt_set_features,
13104         .ndo_features_check     = bnxt_features_check,
13105         .ndo_tx_timeout         = bnxt_tx_timeout,
13106 #ifdef CONFIG_BNXT_SRIOV
13107         .ndo_get_vf_config      = bnxt_get_vf_config,
13108         .ndo_set_vf_mac         = bnxt_set_vf_mac,
13109         .ndo_set_vf_vlan        = bnxt_set_vf_vlan,
13110         .ndo_set_vf_rate        = bnxt_set_vf_bw,
13111         .ndo_set_vf_link_state  = bnxt_set_vf_link_state,
13112         .ndo_set_vf_spoofchk    = bnxt_set_vf_spoofchk,
13113         .ndo_set_vf_trust       = bnxt_set_vf_trust,
13114 #endif
13115         .ndo_setup_tc           = bnxt_setup_tc,
13116 #ifdef CONFIG_RFS_ACCEL
13117         .ndo_rx_flow_steer      = bnxt_rx_flow_steer,
13118 #endif
13119         .ndo_bpf                = bnxt_xdp,
13120         .ndo_xdp_xmit           = bnxt_xdp_xmit,
13121         .ndo_bridge_getlink     = bnxt_bridge_getlink,
13122         .ndo_bridge_setlink     = bnxt_bridge_setlink,
13123         .ndo_get_devlink_port   = bnxt_get_devlink_port,
13124 };
13125
13126 static void bnxt_remove_one(struct pci_dev *pdev)
13127 {
13128         struct net_device *dev = pci_get_drvdata(pdev);
13129         struct bnxt *bp = netdev_priv(dev);
13130
13131         if (BNXT_PF(bp))
13132                 bnxt_sriov_disable(bp);
13133
13134         if (BNXT_PF(bp))
13135                 devlink_port_type_clear(&bp->dl_port);
13136
13137         bnxt_ptp_clear(bp);
13138         pci_disable_pcie_error_reporting(pdev);
13139         unregister_netdev(dev);
13140         clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13141         /* Flush any pending tasks */
13142         cancel_work_sync(&bp->sp_task);
13143         cancel_delayed_work_sync(&bp->fw_reset_task);
13144         bp->sp_event = 0;
13145
13146         bnxt_dl_fw_reporters_destroy(bp);
13147         bnxt_dl_unregister(bp);
13148         bnxt_shutdown_tc(bp);
13149
13150         bnxt_clear_int_mode(bp);
13151         bnxt_hwrm_func_drv_unrgtr(bp);
13152         bnxt_free_hwrm_resources(bp);
13153         bnxt_ethtool_free(bp);
13154         bnxt_dcb_free(bp);
13155         kfree(bp->edev);
13156         bp->edev = NULL;
13157         kfree(bp->ptp_cfg);
13158         bp->ptp_cfg = NULL;
13159         kfree(bp->fw_health);
13160         bp->fw_health = NULL;
13161         bnxt_cleanup_pci(bp);
13162         bnxt_free_ctx_mem(bp);
13163         kfree(bp->ctx);
13164         bp->ctx = NULL;
13165         kfree(bp->rss_indir_tbl);
13166         bp->rss_indir_tbl = NULL;
13167         bnxt_free_port_stats(bp);
13168         free_netdev(dev);
13169 }
13170
13171 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
13172 {
13173         int rc = 0;
13174         struct bnxt_link_info *link_info = &bp->link_info;
13175
13176         bp->phy_flags = 0;
13177         rc = bnxt_hwrm_phy_qcaps(bp);
13178         if (rc) {
13179                 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
13180                            rc);
13181                 return rc;
13182         }
13183         if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
13184                 bp->dev->priv_flags |= IFF_SUPP_NOFCS;
13185         else
13186                 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
13187         if (!fw_dflt)
13188                 return 0;
13189
13190         mutex_lock(&bp->link_lock);
13191         rc = bnxt_update_link(bp, false);
13192         if (rc) {
13193                 mutex_unlock(&bp->link_lock);
13194                 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
13195                            rc);
13196                 return rc;
13197         }
13198
13199         /* Older firmware does not have supported_auto_speeds, so assume
13200          * that all supported speeds can be autonegotiated.
13201          */
13202         if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
13203                 link_info->support_auto_speeds = link_info->support_speeds;
13204
13205         bnxt_init_ethtool_link_settings(bp);
13206         mutex_unlock(&bp->link_lock);
13207         return 0;
13208 }
13209
13210 static int bnxt_get_max_irq(struct pci_dev *pdev)
13211 {
13212         u16 ctrl;
13213
13214         if (!pdev->msix_cap)
13215                 return 1;
13216
13217         pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
13218         return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
13219 }
13220
13221 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
13222                                 int *max_cp)
13223 {
13224         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
13225         int max_ring_grps = 0, max_irq;
13226
13227         *max_tx = hw_resc->max_tx_rings;
13228         *max_rx = hw_resc->max_rx_rings;
13229         *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
13230         max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
13231                         bnxt_get_ulp_msix_num(bp),
13232                         hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
13233         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
13234                 *max_cp = min_t(int, *max_cp, max_irq);
13235         max_ring_grps = hw_resc->max_hw_ring_grps;
13236         if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
13237                 *max_cp -= 1;
13238                 *max_rx -= 2;
13239         }
13240         if (bp->flags & BNXT_FLAG_AGG_RINGS)
13241                 *max_rx >>= 1;
13242         if (bp->flags & BNXT_FLAG_CHIP_P5) {
13243                 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
13244                 /* On P5 chips, max_cp output param should be available NQs */
13245                 *max_cp = max_irq;
13246         }
13247         *max_rx = min_t(int, *max_rx, max_ring_grps);
13248 }
13249
13250 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
13251 {
13252         int rx, tx, cp;
13253
13254         _bnxt_get_max_rings(bp, &rx, &tx, &cp);
13255         *max_rx = rx;
13256         *max_tx = tx;
13257         if (!rx || !tx || !cp)
13258                 return -ENOMEM;
13259
13260         return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
13261 }
13262
13263 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
13264                                bool shared)
13265 {
13266         int rc;
13267
13268         rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
13269         if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
13270                 /* Not enough rings, try disabling agg rings. */
13271                 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
13272                 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
13273                 if (rc) {
13274                         /* set BNXT_FLAG_AGG_RINGS back for consistency */
13275                         bp->flags |= BNXT_FLAG_AGG_RINGS;
13276                         return rc;
13277                 }
13278                 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
13279                 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
13280                 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
13281                 bnxt_set_ring_params(bp);
13282         }
13283
13284         if (bp->flags & BNXT_FLAG_ROCE_CAP) {
13285                 int max_cp, max_stat, max_irq;
13286
13287                 /* Reserve minimum resources for RoCE */
13288                 max_cp = bnxt_get_max_func_cp_rings(bp);
13289                 max_stat = bnxt_get_max_func_stat_ctxs(bp);
13290                 max_irq = bnxt_get_max_func_irqs(bp);
13291                 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
13292                     max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
13293                     max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
13294                         return 0;
13295
13296                 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
13297                 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
13298                 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
13299                 max_cp = min_t(int, max_cp, max_irq);
13300                 max_cp = min_t(int, max_cp, max_stat);
13301                 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
13302                 if (rc)
13303                         rc = 0;
13304         }
13305         return rc;
13306 }
13307
13308 /* In initial default shared ring setting, each shared ring must have a
13309  * RX/TX ring pair.
13310  */
13311 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
13312 {
13313         bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
13314         bp->rx_nr_rings = bp->cp_nr_rings;
13315         bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
13316         bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
13317 }
13318
13319 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
13320 {
13321         int dflt_rings, max_rx_rings, max_tx_rings, rc;
13322
13323         if (!bnxt_can_reserve_rings(bp))
13324                 return 0;
13325
13326         if (sh)
13327                 bp->flags |= BNXT_FLAG_SHARED_RINGS;
13328         dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
13329         /* Reduce default rings on multi-port cards so that total default
13330          * rings do not exceed CPU count.
13331          */
13332         if (bp->port_count > 1) {
13333                 int max_rings =
13334                         max_t(int, num_online_cpus() / bp->port_count, 1);
13335
13336                 dflt_rings = min_t(int, dflt_rings, max_rings);
13337         }
13338         rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
13339         if (rc)
13340                 return rc;
13341         bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
13342         bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
13343         if (sh)
13344                 bnxt_trim_dflt_sh_rings(bp);
13345         else
13346                 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
13347         bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
13348
13349         rc = __bnxt_reserve_rings(bp);
13350         if (rc && rc != -ENODEV)
13351                 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
13352         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13353         if (sh)
13354                 bnxt_trim_dflt_sh_rings(bp);
13355
13356         /* Rings may have been trimmed, re-reserve the trimmed rings. */
13357         if (bnxt_need_reserve_rings(bp)) {
13358                 rc = __bnxt_reserve_rings(bp);
13359                 if (rc && rc != -ENODEV)
13360                         netdev_warn(bp->dev, "2nd rings reservation failed.\n");
13361                 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13362         }
13363         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
13364                 bp->rx_nr_rings++;
13365                 bp->cp_nr_rings++;
13366         }
13367         if (rc) {
13368                 bp->tx_nr_rings = 0;
13369                 bp->rx_nr_rings = 0;
13370         }
13371         return rc;
13372 }
13373
13374 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
13375 {
13376         int rc;
13377
13378         if (bp->tx_nr_rings)
13379                 return 0;
13380
13381         bnxt_ulp_irq_stop(bp);
13382         bnxt_clear_int_mode(bp);
13383         rc = bnxt_set_dflt_rings(bp, true);
13384         if (rc) {
13385                 if (BNXT_VF(bp) && rc == -ENODEV)
13386                         netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
13387                 else
13388                         netdev_err(bp->dev, "Not enough rings available.\n");
13389                 goto init_dflt_ring_err;
13390         }
13391         rc = bnxt_init_int_mode(bp);
13392         if (rc)
13393                 goto init_dflt_ring_err;
13394
13395         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13396
13397         bnxt_set_dflt_rfs(bp);
13398
13399 init_dflt_ring_err:
13400         bnxt_ulp_irq_restart(bp, rc);
13401         return rc;
13402 }
13403
13404 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
13405 {
13406         int rc;
13407
13408         ASSERT_RTNL();
13409         bnxt_hwrm_func_qcaps(bp);
13410
13411         if (netif_running(bp->dev))
13412                 __bnxt_close_nic(bp, true, false);
13413
13414         bnxt_ulp_irq_stop(bp);
13415         bnxt_clear_int_mode(bp);
13416         rc = bnxt_init_int_mode(bp);
13417         bnxt_ulp_irq_restart(bp, rc);
13418
13419         if (netif_running(bp->dev)) {
13420                 if (rc)
13421                         dev_close(bp->dev);
13422                 else
13423                         rc = bnxt_open_nic(bp, true, false);
13424         }
13425
13426         return rc;
13427 }
13428
13429 static int bnxt_init_mac_addr(struct bnxt *bp)
13430 {
13431         int rc = 0;
13432
13433         if (BNXT_PF(bp)) {
13434                 eth_hw_addr_set(bp->dev, bp->pf.mac_addr);
13435         } else {
13436 #ifdef CONFIG_BNXT_SRIOV
13437                 struct bnxt_vf_info *vf = &bp->vf;
13438                 bool strict_approval = true;
13439
13440                 if (is_valid_ether_addr(vf->mac_addr)) {
13441                         /* overwrite netdev dev_addr with admin VF MAC */
13442                         eth_hw_addr_set(bp->dev, vf->mac_addr);
13443                         /* Older PF driver or firmware may not approve this
13444                          * correctly.
13445                          */
13446                         strict_approval = false;
13447                 } else {
13448                         eth_hw_addr_random(bp->dev);
13449                 }
13450                 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
13451 #endif
13452         }
13453         return rc;
13454 }
13455
13456 static void bnxt_vpd_read_info(struct bnxt *bp)
13457 {
13458         struct pci_dev *pdev = bp->pdev;
13459         unsigned int vpd_size, kw_len;
13460         int pos, size;
13461         u8 *vpd_data;
13462
13463         vpd_data = pci_vpd_alloc(pdev, &vpd_size);
13464         if (IS_ERR(vpd_data)) {
13465                 pci_warn(pdev, "Unable to read VPD\n");
13466                 return;
13467         }
13468
13469         pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
13470                                            PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
13471         if (pos < 0)
13472                 goto read_sn;
13473
13474         size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
13475         memcpy(bp->board_partno, &vpd_data[pos], size);
13476
13477 read_sn:
13478         pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
13479                                            PCI_VPD_RO_KEYWORD_SERIALNO,
13480                                            &kw_len);
13481         if (pos < 0)
13482                 goto exit;
13483
13484         size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
13485         memcpy(bp->board_serialno, &vpd_data[pos], size);
13486 exit:
13487         kfree(vpd_data);
13488 }
13489
13490 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
13491 {
13492         struct pci_dev *pdev = bp->pdev;
13493         u64 qword;
13494
13495         qword = pci_get_dsn(pdev);
13496         if (!qword) {
13497                 netdev_info(bp->dev, "Unable to read adapter's DSN\n");
13498                 return -EOPNOTSUPP;
13499         }
13500
13501         put_unaligned_le64(qword, dsn);
13502
13503         bp->flags |= BNXT_FLAG_DSN_VALID;
13504         return 0;
13505 }
13506
13507 static int bnxt_map_db_bar(struct bnxt *bp)
13508 {
13509         if (!bp->db_size)
13510                 return -ENODEV;
13511         bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
13512         if (!bp->bar1)
13513                 return -ENOMEM;
13514         return 0;
13515 }
13516
13517 void bnxt_print_device_info(struct bnxt *bp)
13518 {
13519         netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n",
13520                     board_info[bp->board_idx].name,
13521                     (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr);
13522
13523         pcie_print_link_status(bp->pdev);
13524 }
13525
13526 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
13527 {
13528         struct net_device *dev;
13529         struct bnxt *bp;
13530         int rc, max_irqs;
13531
13532         if (pci_is_bridge(pdev))
13533                 return -ENODEV;
13534
13535         /* Clear any pending DMA transactions from crash kernel
13536          * while loading driver in capture kernel.
13537          */
13538         if (is_kdump_kernel()) {
13539                 pci_clear_master(pdev);
13540                 pcie_flr(pdev);
13541         }
13542
13543         max_irqs = bnxt_get_max_irq(pdev);
13544         dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
13545         if (!dev)
13546                 return -ENOMEM;
13547
13548         bp = netdev_priv(dev);
13549         bp->board_idx = ent->driver_data;
13550         bp->msg_enable = BNXT_DEF_MSG_ENABLE;
13551         bnxt_set_max_func_irqs(bp, max_irqs);
13552
13553         if (bnxt_vf_pciid(bp->board_idx))
13554                 bp->flags |= BNXT_FLAG_VF;
13555
13556         if (pdev->msix_cap)
13557                 bp->flags |= BNXT_FLAG_MSIX_CAP;
13558
13559         rc = bnxt_init_board(pdev, dev);
13560         if (rc < 0)
13561                 goto init_err_free;
13562
13563         dev->netdev_ops = &bnxt_netdev_ops;
13564         dev->watchdog_timeo = BNXT_TX_TIMEOUT;
13565         dev->ethtool_ops = &bnxt_ethtool_ops;
13566         pci_set_drvdata(pdev, dev);
13567
13568         rc = bnxt_alloc_hwrm_resources(bp);
13569         if (rc)
13570                 goto init_err_pci_clean;
13571
13572         mutex_init(&bp->hwrm_cmd_lock);
13573         mutex_init(&bp->link_lock);
13574
13575         rc = bnxt_fw_init_one_p1(bp);
13576         if (rc)
13577                 goto init_err_pci_clean;
13578
13579         if (BNXT_PF(bp))
13580                 bnxt_vpd_read_info(bp);
13581
13582         if (BNXT_CHIP_P5(bp)) {
13583                 bp->flags |= BNXT_FLAG_CHIP_P5;
13584                 if (BNXT_CHIP_SR2(bp))
13585                         bp->flags |= BNXT_FLAG_CHIP_SR2;
13586         }
13587
13588         rc = bnxt_alloc_rss_indir_tbl(bp);
13589         if (rc)
13590                 goto init_err_pci_clean;
13591
13592         rc = bnxt_fw_init_one_p2(bp);
13593         if (rc)
13594                 goto init_err_pci_clean;
13595
13596         rc = bnxt_map_db_bar(bp);
13597         if (rc) {
13598                 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
13599                         rc);
13600                 goto init_err_pci_clean;
13601         }
13602
13603         dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13604                            NETIF_F_TSO | NETIF_F_TSO6 |
13605                            NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
13606                            NETIF_F_GSO_IPXIP4 |
13607                            NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
13608                            NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
13609                            NETIF_F_RXCSUM | NETIF_F_GRO;
13610
13611         if (BNXT_SUPPORTS_TPA(bp))
13612                 dev->hw_features |= NETIF_F_LRO;
13613
13614         dev->hw_enc_features =
13615                         NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13616                         NETIF_F_TSO | NETIF_F_TSO6 |
13617                         NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
13618                         NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
13619                         NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
13620         dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
13621
13622         dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
13623                                     NETIF_F_GSO_GRE_CSUM;
13624         dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
13625         if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
13626                 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
13627         if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
13628                 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
13629         if (BNXT_SUPPORTS_TPA(bp))
13630                 dev->hw_features |= NETIF_F_GRO_HW;
13631         dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
13632         if (dev->features & NETIF_F_GRO_HW)
13633                 dev->features &= ~NETIF_F_LRO;
13634         dev->priv_flags |= IFF_UNICAST_FLT;
13635
13636 #ifdef CONFIG_BNXT_SRIOV
13637         init_waitqueue_head(&bp->sriov_cfg_wait);
13638 #endif
13639         if (BNXT_SUPPORTS_TPA(bp)) {
13640                 bp->gro_func = bnxt_gro_func_5730x;
13641                 if (BNXT_CHIP_P4(bp))
13642                         bp->gro_func = bnxt_gro_func_5731x;
13643                 else if (BNXT_CHIP_P5(bp))
13644                         bp->gro_func = bnxt_gro_func_5750x;
13645         }
13646         if (!BNXT_CHIP_P4_PLUS(bp))
13647                 bp->flags |= BNXT_FLAG_DOUBLE_DB;
13648
13649         rc = bnxt_init_mac_addr(bp);
13650         if (rc) {
13651                 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
13652                 rc = -EADDRNOTAVAIL;
13653                 goto init_err_pci_clean;
13654         }
13655
13656         if (BNXT_PF(bp)) {
13657                 /* Read the adapter's DSN to use as the eswitch switch_id */
13658                 rc = bnxt_pcie_dsn_get(bp, bp->dsn);
13659         }
13660
13661         /* MTU range: 60 - FW defined max */
13662         dev->min_mtu = ETH_ZLEN;
13663         dev->max_mtu = bp->max_mtu;
13664
13665         rc = bnxt_probe_phy(bp, true);
13666         if (rc)
13667                 goto init_err_pci_clean;
13668
13669         bnxt_set_rx_skb_mode(bp, false);
13670         bnxt_set_tpa_flags(bp);
13671         bnxt_set_ring_params(bp);
13672         rc = bnxt_set_dflt_rings(bp, true);
13673         if (rc) {
13674                 if (BNXT_VF(bp) && rc == -ENODEV) {
13675                         netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
13676                 } else {
13677                         netdev_err(bp->dev, "Not enough rings available.\n");
13678                         rc = -ENOMEM;
13679                 }
13680                 goto init_err_pci_clean;
13681         }
13682
13683         bnxt_fw_init_one_p3(bp);
13684
13685         bnxt_init_dflt_coal(bp);
13686
13687         if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
13688                 bp->flags |= BNXT_FLAG_STRIP_VLAN;
13689
13690         rc = bnxt_init_int_mode(bp);
13691         if (rc)
13692                 goto init_err_pci_clean;
13693
13694         /* No TC has been set yet and rings may have been trimmed due to
13695          * limited MSIX, so we re-initialize the TX rings per TC.
13696          */
13697         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13698
13699         if (BNXT_PF(bp)) {
13700                 if (!bnxt_pf_wq) {
13701                         bnxt_pf_wq =
13702                                 create_singlethread_workqueue("bnxt_pf_wq");
13703                         if (!bnxt_pf_wq) {
13704                                 dev_err(&pdev->dev, "Unable to create workqueue.\n");
13705                                 rc = -ENOMEM;
13706                                 goto init_err_pci_clean;
13707                         }
13708                 }
13709                 rc = bnxt_init_tc(bp);
13710                 if (rc)
13711                         netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
13712                                    rc);
13713         }
13714
13715         bnxt_inv_fw_health_reg(bp);
13716         rc = bnxt_dl_register(bp);
13717         if (rc)
13718                 goto init_err_dl;
13719
13720         rc = register_netdev(dev);
13721         if (rc)
13722                 goto init_err_cleanup;
13723
13724         if (BNXT_PF(bp))
13725                 devlink_port_type_eth_set(&bp->dl_port, bp->dev);
13726         bnxt_dl_fw_reporters_create(bp);
13727
13728         bnxt_print_device_info(bp);
13729
13730         pci_save_state(pdev);
13731         return 0;
13732
13733 init_err_cleanup:
13734         bnxt_dl_unregister(bp);
13735 init_err_dl:
13736         bnxt_shutdown_tc(bp);
13737         bnxt_clear_int_mode(bp);
13738
13739 init_err_pci_clean:
13740         bnxt_hwrm_func_drv_unrgtr(bp);
13741         bnxt_free_hwrm_resources(bp);
13742         bnxt_ethtool_free(bp);
13743         bnxt_ptp_clear(bp);
13744         kfree(bp->ptp_cfg);
13745         bp->ptp_cfg = NULL;
13746         kfree(bp->fw_health);
13747         bp->fw_health = NULL;
13748         bnxt_cleanup_pci(bp);
13749         bnxt_free_ctx_mem(bp);
13750         kfree(bp->ctx);
13751         bp->ctx = NULL;
13752         kfree(bp->rss_indir_tbl);
13753         bp->rss_indir_tbl = NULL;
13754
13755 init_err_free:
13756         free_netdev(dev);
13757         return rc;
13758 }
13759
13760 static void bnxt_shutdown(struct pci_dev *pdev)
13761 {
13762         struct net_device *dev = pci_get_drvdata(pdev);
13763         struct bnxt *bp;
13764
13765         if (!dev)
13766                 return;
13767
13768         rtnl_lock();
13769         bp = netdev_priv(dev);
13770         if (!bp)
13771                 goto shutdown_exit;
13772
13773         if (netif_running(dev))
13774                 dev_close(dev);
13775
13776         bnxt_ulp_shutdown(bp);
13777         bnxt_clear_int_mode(bp);
13778         pci_disable_device(pdev);
13779
13780         if (system_state == SYSTEM_POWER_OFF) {
13781                 pci_wake_from_d3(pdev, bp->wol);
13782                 pci_set_power_state(pdev, PCI_D3hot);
13783         }
13784
13785 shutdown_exit:
13786         rtnl_unlock();
13787 }
13788
13789 #ifdef CONFIG_PM_SLEEP
13790 static int bnxt_suspend(struct device *device)
13791 {
13792         struct net_device *dev = dev_get_drvdata(device);
13793         struct bnxt *bp = netdev_priv(dev);
13794         int rc = 0;
13795
13796         rtnl_lock();
13797         bnxt_ulp_stop(bp);
13798         if (netif_running(dev)) {
13799                 netif_device_detach(dev);
13800                 rc = bnxt_close(dev);
13801         }
13802         bnxt_hwrm_func_drv_unrgtr(bp);
13803         pci_disable_device(bp->pdev);
13804         bnxt_free_ctx_mem(bp);
13805         kfree(bp->ctx);
13806         bp->ctx = NULL;
13807         rtnl_unlock();
13808         return rc;
13809 }
13810
13811 static int bnxt_resume(struct device *device)
13812 {
13813         struct net_device *dev = dev_get_drvdata(device);
13814         struct bnxt *bp = netdev_priv(dev);
13815         int rc = 0;
13816
13817         rtnl_lock();
13818         rc = pci_enable_device(bp->pdev);
13819         if (rc) {
13820                 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
13821                            rc);
13822                 goto resume_exit;
13823         }
13824         pci_set_master(bp->pdev);
13825         if (bnxt_hwrm_ver_get(bp)) {
13826                 rc = -ENODEV;
13827                 goto resume_exit;
13828         }
13829         rc = bnxt_hwrm_func_reset(bp);
13830         if (rc) {
13831                 rc = -EBUSY;
13832                 goto resume_exit;
13833         }
13834
13835         rc = bnxt_hwrm_func_qcaps(bp);
13836         if (rc)
13837                 goto resume_exit;
13838
13839         if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
13840                 rc = -ENODEV;
13841                 goto resume_exit;
13842         }
13843
13844         bnxt_get_wol_settings(bp);
13845         if (netif_running(dev)) {
13846                 rc = bnxt_open(dev);
13847                 if (!rc)
13848                         netif_device_attach(dev);
13849         }
13850
13851 resume_exit:
13852         bnxt_ulp_start(bp, rc);
13853         if (!rc)
13854                 bnxt_reenable_sriov(bp);
13855         rtnl_unlock();
13856         return rc;
13857 }
13858
13859 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
13860 #define BNXT_PM_OPS (&bnxt_pm_ops)
13861
13862 #else
13863
13864 #define BNXT_PM_OPS NULL
13865
13866 #endif /* CONFIG_PM_SLEEP */
13867
13868 /**
13869  * bnxt_io_error_detected - called when PCI error is detected
13870  * @pdev: Pointer to PCI device
13871  * @state: The current pci connection state
13872  *
13873  * This function is called after a PCI bus error affecting
13874  * this device has been detected.
13875  */
13876 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
13877                                                pci_channel_state_t state)
13878 {
13879         struct net_device *netdev = pci_get_drvdata(pdev);
13880         struct bnxt *bp = netdev_priv(netdev);
13881
13882         netdev_info(netdev, "PCI I/O error detected\n");
13883
13884         rtnl_lock();
13885         netif_device_detach(netdev);
13886
13887         bnxt_ulp_stop(bp);
13888
13889         if (state == pci_channel_io_perm_failure) {
13890                 rtnl_unlock();
13891                 return PCI_ERS_RESULT_DISCONNECT;
13892         }
13893
13894         if (state == pci_channel_io_frozen)
13895                 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
13896
13897         if (netif_running(netdev))
13898                 bnxt_close(netdev);
13899
13900         if (pci_is_enabled(pdev))
13901                 pci_disable_device(pdev);
13902         bnxt_free_ctx_mem(bp);
13903         kfree(bp->ctx);
13904         bp->ctx = NULL;
13905         rtnl_unlock();
13906
13907         /* Request a slot slot reset. */
13908         return PCI_ERS_RESULT_NEED_RESET;
13909 }
13910
13911 /**
13912  * bnxt_io_slot_reset - called after the pci bus has been reset.
13913  * @pdev: Pointer to PCI device
13914  *
13915  * Restart the card from scratch, as if from a cold-boot.
13916  * At this point, the card has exprienced a hard reset,
13917  * followed by fixups by BIOS, and has its config space
13918  * set up identically to what it was at cold boot.
13919  */
13920 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
13921 {
13922         pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
13923         struct net_device *netdev = pci_get_drvdata(pdev);
13924         struct bnxt *bp = netdev_priv(netdev);
13925         int retry = 0;
13926         int err = 0;
13927         int off;
13928
13929         netdev_info(bp->dev, "PCI Slot Reset\n");
13930
13931         rtnl_lock();
13932
13933         if (pci_enable_device(pdev)) {
13934                 dev_err(&pdev->dev,
13935                         "Cannot re-enable PCI device after reset.\n");
13936         } else {
13937                 pci_set_master(pdev);
13938                 /* Upon fatal error, our device internal logic that latches to
13939                  * BAR value is getting reset and will restore only upon
13940                  * rewritting the BARs.
13941                  *
13942                  * As pci_restore_state() does not re-write the BARs if the
13943                  * value is same as saved value earlier, driver needs to
13944                  * write the BARs to 0 to force restore, in case of fatal error.
13945                  */
13946                 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
13947                                        &bp->state)) {
13948                         for (off = PCI_BASE_ADDRESS_0;
13949                              off <= PCI_BASE_ADDRESS_5; off += 4)
13950                                 pci_write_config_dword(bp->pdev, off, 0);
13951                 }
13952                 pci_restore_state(pdev);
13953                 pci_save_state(pdev);
13954
13955                 bnxt_inv_fw_health_reg(bp);
13956                 bnxt_try_map_fw_health_reg(bp);
13957
13958                 /* In some PCIe AER scenarios, firmware may take up to
13959                  * 10 seconds to become ready in the worst case.
13960                  */
13961                 do {
13962                         err = bnxt_try_recover_fw(bp);
13963                         if (!err)
13964                                 break;
13965                         retry++;
13966                 } while (retry < BNXT_FW_SLOT_RESET_RETRY);
13967
13968                 if (err) {
13969                         dev_err(&pdev->dev, "Firmware not ready\n");
13970                         goto reset_exit;
13971                 }
13972
13973                 err = bnxt_hwrm_func_reset(bp);
13974                 if (!err)
13975                         result = PCI_ERS_RESULT_RECOVERED;
13976
13977                 bnxt_ulp_irq_stop(bp);
13978                 bnxt_clear_int_mode(bp);
13979                 err = bnxt_init_int_mode(bp);
13980                 bnxt_ulp_irq_restart(bp, err);
13981         }
13982
13983 reset_exit:
13984         bnxt_clear_reservations(bp, true);
13985         rtnl_unlock();
13986
13987         return result;
13988 }
13989
13990 /**
13991  * bnxt_io_resume - called when traffic can start flowing again.
13992  * @pdev: Pointer to PCI device
13993  *
13994  * This callback is called when the error recovery driver tells
13995  * us that its OK to resume normal operation.
13996  */
13997 static void bnxt_io_resume(struct pci_dev *pdev)
13998 {
13999         struct net_device *netdev = pci_get_drvdata(pdev);
14000         struct bnxt *bp = netdev_priv(netdev);
14001         int err;
14002
14003         netdev_info(bp->dev, "PCI Slot Resume\n");
14004         rtnl_lock();
14005
14006         err = bnxt_hwrm_func_qcaps(bp);
14007         if (!err && netif_running(netdev))
14008                 err = bnxt_open(netdev);
14009
14010         bnxt_ulp_start(bp, err);
14011         if (!err) {
14012                 bnxt_reenable_sriov(bp);
14013                 netif_device_attach(netdev);
14014         }
14015
14016         rtnl_unlock();
14017 }
14018
14019 static const struct pci_error_handlers bnxt_err_handler = {
14020         .error_detected = bnxt_io_error_detected,
14021         .slot_reset     = bnxt_io_slot_reset,
14022         .resume         = bnxt_io_resume
14023 };
14024
14025 static struct pci_driver bnxt_pci_driver = {
14026         .name           = DRV_MODULE_NAME,
14027         .id_table       = bnxt_pci_tbl,
14028         .probe          = bnxt_init_one,
14029         .remove         = bnxt_remove_one,
14030         .shutdown       = bnxt_shutdown,
14031         .driver.pm      = BNXT_PM_OPS,
14032         .err_handler    = &bnxt_err_handler,
14033 #if defined(CONFIG_BNXT_SRIOV)
14034         .sriov_configure = bnxt_sriov_configure,
14035 #endif
14036 };
14037
14038 static int __init bnxt_init(void)
14039 {
14040         bnxt_debug_init();
14041         return pci_register_driver(&bnxt_pci_driver);
14042 }
14043
14044 static void __exit bnxt_exit(void)
14045 {
14046         pci_unregister_driver(&bnxt_pci_driver);
14047         if (bnxt_pf_wq)
14048                 destroy_workqueue(bnxt_pf_wq);
14049         bnxt_debug_exit();
14050 }
14051
14052 module_init(bnxt_init);
14053 module_exit(bnxt_exit);