Merge tag 'asm-generic-fixes-6.1-1' of git://git.kernel.org/pub/scm/linux/kernel...
[platform/kernel/linux-starfive.git] / drivers / net / ethernet / aquantia / atlantic / aq_nic.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Atlantic Network Driver
3  *
4  * Copyright (C) 2014-2019 aQuantia Corporation
5  * Copyright (C) 2019-2020 Marvell International Ltd.
6  */
7
8 /* File aq_nic.c: Definition of common code for NIC. */
9
10 #include "aq_nic.h"
11 #include "aq_ring.h"
12 #include "aq_vec.h"
13 #include "aq_hw.h"
14 #include "aq_pci_func.h"
15 #include "aq_macsec.h"
16 #include "aq_main.h"
17 #include "aq_phy.h"
18 #include "aq_ptp.h"
19 #include "aq_filters.h"
20
21 #include <linux/moduleparam.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/timer.h>
25 #include <linux/cpu.h>
26 #include <linux/ip.h>
27 #include <linux/tcp.h>
28 #include <net/ip.h>
29 #include <net/pkt_cls.h>
30
31 static unsigned int aq_itr = AQ_CFG_INTERRUPT_MODERATION_AUTO;
32 module_param_named(aq_itr, aq_itr, uint, 0644);
33 MODULE_PARM_DESC(aq_itr, "Interrupt throttling mode");
34
35 static unsigned int aq_itr_tx;
36 module_param_named(aq_itr_tx, aq_itr_tx, uint, 0644);
37 MODULE_PARM_DESC(aq_itr_tx, "TX interrupt throttle rate");
38
39 static unsigned int aq_itr_rx;
40 module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644);
41 MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate");
42
43 static void aq_nic_update_ndev_stats(struct aq_nic_s *self);
44
45 static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
46 {
47         static u8 rss_key[AQ_CFG_RSS_HASHKEY_SIZE] = {
48                 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
49                 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
50                 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
51                 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
52                 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
53         };
54         struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
55         struct aq_rss_parameters *rss_params;
56         int i = 0;
57
58         rss_params = &cfg->aq_rss;
59
60         rss_params->hash_secret_key_size = sizeof(rss_key);
61         memcpy(rss_params->hash_secret_key, rss_key, sizeof(rss_key));
62         rss_params->indirection_table_size = AQ_CFG_RSS_INDIRECTION_TABLE_MAX;
63
64         for (i = rss_params->indirection_table_size; i--;)
65                 rss_params->indirection_table[i] = i & (num_rss_queues - 1);
66 }
67
68 /* Recalculate the number of vectors */
69 static void aq_nic_cfg_update_num_vecs(struct aq_nic_s *self)
70 {
71         struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
72
73         cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF);
74         cfg->vecs = min(cfg->vecs, num_online_cpus());
75         if (self->irqvecs > AQ_HW_SERVICE_IRQS)
76                 cfg->vecs = min(cfg->vecs, self->irqvecs - AQ_HW_SERVICE_IRQS);
77         /* cfg->vecs should be power of 2 for RSS */
78         cfg->vecs = rounddown_pow_of_two(cfg->vecs);
79
80         if (ATL_HW_IS_CHIP_FEATURE(self->aq_hw, ANTIGUA)) {
81                 if (cfg->tcs > 2)
82                         cfg->vecs = min(cfg->vecs, 4U);
83         }
84
85         if (cfg->vecs <= 4)
86                 cfg->tc_mode = AQ_TC_MODE_8TCS;
87         else
88                 cfg->tc_mode = AQ_TC_MODE_4TCS;
89
90         /*rss rings */
91         cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF);
92         aq_nic_rss_init(self, cfg->num_rss_queues);
93 }
94
95 /* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */
96 void aq_nic_cfg_start(struct aq_nic_s *self)
97 {
98         struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
99         int i;
100
101         cfg->tcs = AQ_CFG_TCS_DEF;
102
103         cfg->is_polling = AQ_CFG_IS_POLLING_DEF;
104
105         cfg->itr = aq_itr;
106         cfg->tx_itr = aq_itr_tx;
107         cfg->rx_itr = aq_itr_rx;
108
109         cfg->rxpageorder = AQ_CFG_RX_PAGEORDER;
110         cfg->is_rss = AQ_CFG_IS_RSS_DEF;
111         cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF;
112         cfg->fc.req = AQ_CFG_FC_MODE;
113         cfg->wol = AQ_CFG_WOL_MODES;
114
115         cfg->mtu = AQ_CFG_MTU_DEF;
116         cfg->link_speed_msk = AQ_CFG_SPEED_MSK;
117         cfg->is_autoneg = AQ_CFG_IS_AUTONEG_DEF;
118
119         cfg->is_lro = AQ_CFG_IS_LRO_DEF;
120         cfg->is_ptp = true;
121
122         /*descriptors */
123         cfg->rxds = min(cfg->aq_hw_caps->rxds_max, AQ_CFG_RXDS_DEF);
124         cfg->txds = min(cfg->aq_hw_caps->txds_max, AQ_CFG_TXDS_DEF);
125
126         aq_nic_cfg_update_num_vecs(self);
127
128         cfg->irq_type = aq_pci_func_get_irq_type(self);
129
130         if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) ||
131             (cfg->aq_hw_caps->vecs == 1U) ||
132             (cfg->vecs == 1U)) {
133                 cfg->is_rss = 0U;
134                 cfg->vecs = 1U;
135         }
136
137         /* Check if we have enough vectors allocated for
138          * link status IRQ. If no - we'll know link state from
139          * slower service task.
140          */
141         if (AQ_HW_SERVICE_IRQS > 0 && cfg->vecs + 1 <= self->irqvecs)
142                 cfg->link_irq_vec = cfg->vecs;
143         else
144                 cfg->link_irq_vec = 0;
145
146         cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk;
147         cfg->features = cfg->aq_hw_caps->hw_features;
148         cfg->is_vlan_rx_strip = !!(cfg->features & NETIF_F_HW_VLAN_CTAG_RX);
149         cfg->is_vlan_tx_insert = !!(cfg->features & NETIF_F_HW_VLAN_CTAG_TX);
150         cfg->is_vlan_force_promisc = true;
151
152         for (i = 0; i < sizeof(cfg->prio_tc_map); i++)
153                 cfg->prio_tc_map[i] = cfg->tcs * i / 8;
154 }
155
156 static int aq_nic_update_link_status(struct aq_nic_s *self)
157 {
158         int err = self->aq_fw_ops->update_link_status(self->aq_hw);
159         u32 fc = 0;
160
161         if (err)
162                 return err;
163
164         if (self->aq_fw_ops->get_flow_control)
165                 self->aq_fw_ops->get_flow_control(self->aq_hw, &fc);
166         self->aq_nic_cfg.fc.cur = fc;
167
168         if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) {
169                 netdev_info(self->ndev, "%s: link change old %d new %d\n",
170                             AQ_CFG_DRV_NAME, self->link_status.mbps,
171                             self->aq_hw->aq_link_status.mbps);
172                 aq_nic_update_interrupt_moderation_settings(self);
173
174                 if (self->aq_ptp) {
175                         aq_ptp_clock_init(self);
176                         aq_ptp_tm_offset_set(self,
177                                              self->aq_hw->aq_link_status.mbps);
178                         aq_ptp_link_change(self);
179                 }
180
181                 /* Driver has to update flow control settings on RX block
182                  * on any link event.
183                  * We should query FW whether it negotiated FC.
184                  */
185                 if (self->aq_hw_ops->hw_set_fc)
186                         self->aq_hw_ops->hw_set_fc(self->aq_hw, fc, 0);
187         }
188
189         self->link_status = self->aq_hw->aq_link_status;
190         if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) {
191                 aq_utils_obj_set(&self->flags,
192                                  AQ_NIC_FLAG_STARTED);
193                 aq_utils_obj_clear(&self->flags,
194                                    AQ_NIC_LINK_DOWN);
195                 netif_carrier_on(self->ndev);
196 #if IS_ENABLED(CONFIG_MACSEC)
197                 aq_macsec_enable(self);
198 #endif
199                 if (self->aq_hw_ops->hw_tc_rate_limit_set)
200                         self->aq_hw_ops->hw_tc_rate_limit_set(self->aq_hw);
201
202                 netif_tx_wake_all_queues(self->ndev);
203         }
204         if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) {
205                 netif_carrier_off(self->ndev);
206                 netif_tx_disable(self->ndev);
207                 aq_utils_obj_set(&self->flags, AQ_NIC_LINK_DOWN);
208         }
209
210         return 0;
211 }
212
213 static irqreturn_t aq_linkstate_threaded_isr(int irq, void *private)
214 {
215         struct aq_nic_s *self = private;
216
217         if (!self)
218                 return IRQ_NONE;
219
220         aq_nic_update_link_status(self);
221
222         self->aq_hw_ops->hw_irq_enable(self->aq_hw,
223                                        BIT(self->aq_nic_cfg.link_irq_vec));
224
225         return IRQ_HANDLED;
226 }
227
228 static void aq_nic_service_task(struct work_struct *work)
229 {
230         struct aq_nic_s *self = container_of(work, struct aq_nic_s,
231                                              service_task);
232         int err;
233
234         aq_ptp_service_task(self);
235
236         if (aq_utils_obj_test(&self->flags, AQ_NIC_FLAGS_IS_NOT_READY))
237                 return;
238
239         err = aq_nic_update_link_status(self);
240         if (err)
241                 return;
242
243 #if IS_ENABLED(CONFIG_MACSEC)
244         aq_macsec_work(self);
245 #endif
246
247         mutex_lock(&self->fwreq_mutex);
248         if (self->aq_fw_ops->update_stats)
249                 self->aq_fw_ops->update_stats(self->aq_hw);
250         mutex_unlock(&self->fwreq_mutex);
251
252         aq_nic_update_ndev_stats(self);
253 }
254
255 static void aq_nic_service_timer_cb(struct timer_list *t)
256 {
257         struct aq_nic_s *self = from_timer(self, t, service_timer);
258
259         mod_timer(&self->service_timer,
260                   jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
261
262         aq_ndev_schedule_work(&self->service_task);
263 }
264
265 static void aq_nic_polling_timer_cb(struct timer_list *t)
266 {
267         struct aq_nic_s *self = from_timer(self, t, polling_timer);
268         unsigned int i = 0U;
269
270         for (i = 0U; self->aq_vecs > i; ++i)
271                 aq_vec_isr(i, (void *)self->aq_vec[i]);
272
273         mod_timer(&self->polling_timer, jiffies +
274                   AQ_CFG_POLLING_TIMER_INTERVAL);
275 }
276
277 static int aq_nic_hw_prepare(struct aq_nic_s *self)
278 {
279         int err = 0;
280
281         err = self->aq_hw_ops->hw_soft_reset(self->aq_hw);
282         if (err)
283                 goto exit;
284
285         err = self->aq_hw_ops->hw_prepare(self->aq_hw, &self->aq_fw_ops);
286
287 exit:
288         return err;
289 }
290
291 static bool aq_nic_is_valid_ether_addr(const u8 *addr)
292 {
293         /* Some engineering samples of Aquantia NICs are provisioned with a
294          * partially populated MAC, which is still invalid.
295          */
296         return !(addr[0] == 0 && addr[1] == 0 && addr[2] == 0);
297 }
298
299 int aq_nic_ndev_register(struct aq_nic_s *self)
300 {
301         u8 addr[ETH_ALEN];
302         int err = 0;
303
304         if (!self->ndev) {
305                 err = -EINVAL;
306                 goto err_exit;
307         }
308
309         err = aq_nic_hw_prepare(self);
310         if (err)
311                 goto err_exit;
312
313 #if IS_ENABLED(CONFIG_MACSEC)
314         aq_macsec_init(self);
315 #endif
316
317         if (platform_get_ethdev_address(&self->pdev->dev, self->ndev) != 0) {
318                 // If DT has none or an invalid one, ask device for MAC address
319                 mutex_lock(&self->fwreq_mutex);
320                 err = self->aq_fw_ops->get_mac_permanent(self->aq_hw, addr);
321                 mutex_unlock(&self->fwreq_mutex);
322
323                 if (err)
324                         goto err_exit;
325
326                 if (is_valid_ether_addr(addr) &&
327                     aq_nic_is_valid_ether_addr(addr)) {
328                         eth_hw_addr_set(self->ndev, addr);
329                 } else {
330                         netdev_warn(self->ndev, "MAC is invalid, will use random.");
331                         eth_hw_addr_random(self->ndev);
332                 }
333         }
334
335 #if defined(AQ_CFG_MAC_ADDR_PERMANENT)
336         {
337                 static u8 mac_addr_permanent[] = AQ_CFG_MAC_ADDR_PERMANENT;
338
339                 eth_hw_addr_set(self->ndev, mac_addr_permanent);
340         }
341 #endif
342
343         for (self->aq_vecs = 0; self->aq_vecs < aq_nic_get_cfg(self)->vecs;
344              self->aq_vecs++) {
345                 self->aq_vec[self->aq_vecs] =
346                     aq_vec_alloc(self, self->aq_vecs, aq_nic_get_cfg(self));
347                 if (!self->aq_vec[self->aq_vecs]) {
348                         err = -ENOMEM;
349                         goto err_exit;
350                 }
351         }
352
353         netif_carrier_off(self->ndev);
354
355         netif_tx_disable(self->ndev);
356
357         err = register_netdev(self->ndev);
358         if (err)
359                 goto err_exit;
360
361 err_exit:
362 #if IS_ENABLED(CONFIG_MACSEC)
363         if (err)
364                 aq_macsec_free(self);
365 #endif
366         return err;
367 }
368
369 void aq_nic_ndev_init(struct aq_nic_s *self)
370 {
371         const struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps;
372         struct aq_nic_cfg_s *aq_nic_cfg = &self->aq_nic_cfg;
373
374         self->ndev->hw_features |= aq_hw_caps->hw_features;
375         self->ndev->features = aq_hw_caps->hw_features;
376         self->ndev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
377                                      NETIF_F_RXHASH | NETIF_F_SG |
378                                      NETIF_F_LRO | NETIF_F_TSO | NETIF_F_TSO6;
379         self->ndev->gso_partial_features = NETIF_F_GSO_UDP_L4;
380         self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
381         self->ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
382
383         self->msg_enable = NETIF_MSG_DRV | NETIF_MSG_LINK;
384         self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
385         self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN;
386
387 }
388
389 void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
390                         struct aq_ring_s *ring)
391 {
392         self->aq_ring_tx[idx] = ring;
393 }
394
395 struct net_device *aq_nic_get_ndev(struct aq_nic_s *self)
396 {
397         return self->ndev;
398 }
399
400 int aq_nic_init(struct aq_nic_s *self)
401 {
402         struct aq_vec_s *aq_vec = NULL;
403         unsigned int i = 0U;
404         int err = 0;
405
406         self->power_state = AQ_HW_POWER_STATE_D0;
407         mutex_lock(&self->fwreq_mutex);
408         err = self->aq_hw_ops->hw_reset(self->aq_hw);
409         mutex_unlock(&self->fwreq_mutex);
410         if (err < 0)
411                 goto err_exit;
412         /* Restore default settings */
413         aq_nic_set_downshift(self, self->aq_nic_cfg.downshift_counter);
414         aq_nic_set_media_detect(self, self->aq_nic_cfg.is_media_detect ?
415                                 AQ_HW_MEDIA_DETECT_CNT : 0);
416
417         err = self->aq_hw_ops->hw_init(self->aq_hw,
418                                        aq_nic_get_ndev(self)->dev_addr);
419         if (err < 0)
420                 goto err_exit;
421
422         if (ATL_HW_IS_CHIP_FEATURE(self->aq_hw, ATLANTIC) &&
423             self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_TP) {
424                 self->aq_hw->phy_id = HW_ATL_PHY_ID_MAX;
425                 err = aq_phy_init(self->aq_hw);
426
427                 /* Disable the PTP on NICs where it's known to cause datapath
428                  * problems.
429                  * Ideally this should have been done by PHY provisioning, but
430                  * many units have been shipped with enabled PTP block already.
431                  */
432                 if (self->aq_nic_cfg.aq_hw_caps->quirks & AQ_NIC_QUIRK_BAD_PTP)
433                         if (self->aq_hw->phy_id != HW_ATL_PHY_ID_MAX)
434                                 aq_phy_disable_ptp(self->aq_hw);
435         }
436
437         for (i = 0U; i < self->aq_vecs; i++) {
438                 aq_vec = self->aq_vec[i];
439                 err = aq_vec_ring_alloc(aq_vec, self, i,
440                                         aq_nic_get_cfg(self));
441                 if (err)
442                         goto err_exit;
443
444                 aq_vec_init(aq_vec, self->aq_hw_ops, self->aq_hw);
445         }
446
447         if (aq_nic_get_cfg(self)->is_ptp) {
448                 err = aq_ptp_init(self, self->irqvecs - 1);
449                 if (err < 0)
450                         goto err_exit;
451
452                 err = aq_ptp_ring_alloc(self);
453                 if (err < 0)
454                         goto err_exit;
455
456                 err = aq_ptp_ring_init(self);
457                 if (err < 0)
458                         goto err_exit;
459         }
460
461         netif_carrier_off(self->ndev);
462
463 err_exit:
464         return err;
465 }
466
467 int aq_nic_start(struct aq_nic_s *self)
468 {
469         struct aq_vec_s *aq_vec = NULL;
470         struct aq_nic_cfg_s *cfg;
471         unsigned int i = 0U;
472         int err = 0;
473
474         cfg = aq_nic_get_cfg(self);
475
476         err = self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
477                                                      self->mc_list.ar,
478                                                      self->mc_list.count);
479         if (err < 0)
480                 goto err_exit;
481
482         err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw,
483                                                     self->packet_filter);
484         if (err < 0)
485                 goto err_exit;
486
487         for (i = 0U; self->aq_vecs > i; ++i) {
488                 aq_vec = self->aq_vec[i];
489                 err = aq_vec_start(aq_vec);
490                 if (err < 0)
491                         goto err_exit;
492         }
493
494         err = aq_ptp_ring_start(self);
495         if (err < 0)
496                 goto err_exit;
497
498         aq_nic_set_loopback(self);
499
500         err = self->aq_hw_ops->hw_start(self->aq_hw);
501         if (err < 0)
502                 goto err_exit;
503
504         err = aq_nic_update_interrupt_moderation_settings(self);
505         if (err)
506                 goto err_exit;
507
508         INIT_WORK(&self->service_task, aq_nic_service_task);
509
510         timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0);
511         aq_nic_service_timer_cb(&self->service_timer);
512
513         if (cfg->is_polling) {
514                 timer_setup(&self->polling_timer, aq_nic_polling_timer_cb, 0);
515                 mod_timer(&self->polling_timer, jiffies +
516                           AQ_CFG_POLLING_TIMER_INTERVAL);
517         } else {
518                 for (i = 0U; self->aq_vecs > i; ++i) {
519                         aq_vec = self->aq_vec[i];
520                         err = aq_pci_func_alloc_irq(self, i, self->ndev->name,
521                                                     aq_vec_isr, aq_vec,
522                                                     aq_vec_get_affinity_mask(aq_vec));
523                         if (err < 0)
524                                 goto err_exit;
525                 }
526
527                 err = aq_ptp_irq_alloc(self);
528                 if (err < 0)
529                         goto err_exit;
530
531                 if (cfg->link_irq_vec) {
532                         int irqvec = pci_irq_vector(self->pdev,
533                                                     cfg->link_irq_vec);
534                         err = request_threaded_irq(irqvec, NULL,
535                                                    aq_linkstate_threaded_isr,
536                                                    IRQF_SHARED | IRQF_ONESHOT,
537                                                    self->ndev->name, self);
538                         if (err < 0)
539                                 goto err_exit;
540                         self->msix_entry_mask |= (1 << cfg->link_irq_vec);
541                 }
542
543                 err = self->aq_hw_ops->hw_irq_enable(self->aq_hw,
544                                                      AQ_CFG_IRQ_MASK);
545                 if (err < 0)
546                         goto err_exit;
547         }
548
549         err = netif_set_real_num_tx_queues(self->ndev,
550                                            self->aq_vecs * cfg->tcs);
551         if (err < 0)
552                 goto err_exit;
553
554         err = netif_set_real_num_rx_queues(self->ndev,
555                                            self->aq_vecs * cfg->tcs);
556         if (err < 0)
557                 goto err_exit;
558
559         for (i = 0; i < cfg->tcs; i++) {
560                 u16 offset = self->aq_vecs * i;
561
562                 netdev_set_tc_queue(self->ndev, i, self->aq_vecs, offset);
563         }
564         netif_tx_start_all_queues(self->ndev);
565
566 err_exit:
567         return err;
568 }
569
570 static unsigned int aq_nic_map_xdp(struct aq_nic_s *self,
571                                    struct xdp_frame *xdpf,
572                                    struct aq_ring_s *ring)
573 {
574         struct device *dev = aq_nic_get_dev(self);
575         struct aq_ring_buff_s *first = NULL;
576         unsigned int dx = ring->sw_tail;
577         struct aq_ring_buff_s *dx_buff;
578         struct skb_shared_info *sinfo;
579         unsigned int frag_count = 0U;
580         unsigned int nr_frags = 0U;
581         unsigned int ret = 0U;
582         u16 total_len;
583
584         dx_buff = &ring->buff_ring[dx];
585         dx_buff->flags = 0U;
586
587         sinfo = xdp_get_shared_info_from_frame(xdpf);
588         total_len = xdpf->len;
589         dx_buff->len = total_len;
590         if (xdp_frame_has_frags(xdpf)) {
591                 nr_frags = sinfo->nr_frags;
592                 total_len += sinfo->xdp_frags_size;
593         }
594         dx_buff->pa = dma_map_single(dev, xdpf->data, dx_buff->len,
595                                      DMA_TO_DEVICE);
596
597         if (unlikely(dma_mapping_error(dev, dx_buff->pa)))
598                 goto exit;
599
600         first = dx_buff;
601         dx_buff->len_pkt = total_len;
602         dx_buff->is_sop = 1U;
603         dx_buff->is_mapped = 1U;
604         ++ret;
605
606         for (; nr_frags--; ++frag_count) {
607                 skb_frag_t *frag = &sinfo->frags[frag_count];
608                 unsigned int frag_len = skb_frag_size(frag);
609                 unsigned int buff_offset = 0U;
610                 unsigned int buff_size = 0U;
611                 dma_addr_t frag_pa;
612
613                 while (frag_len) {
614                         if (frag_len > AQ_CFG_TX_FRAME_MAX)
615                                 buff_size = AQ_CFG_TX_FRAME_MAX;
616                         else
617                                 buff_size = frag_len;
618
619                         frag_pa = skb_frag_dma_map(dev, frag, buff_offset,
620                                                    buff_size, DMA_TO_DEVICE);
621
622                         if (unlikely(dma_mapping_error(dev, frag_pa)))
623                                 goto mapping_error;
624
625                         dx = aq_ring_next_dx(ring, dx);
626                         dx_buff = &ring->buff_ring[dx];
627
628                         dx_buff->flags = 0U;
629                         dx_buff->len = buff_size;
630                         dx_buff->pa = frag_pa;
631                         dx_buff->is_mapped = 1U;
632                         dx_buff->eop_index = 0xffffU;
633
634                         frag_len -= buff_size;
635                         buff_offset += buff_size;
636
637                         ++ret;
638                 }
639         }
640
641         first->eop_index = dx;
642         dx_buff->is_eop = 1U;
643         dx_buff->skb = NULL;
644         dx_buff->xdpf = xdpf;
645         goto exit;
646
647 mapping_error:
648         for (dx = ring->sw_tail;
649              ret > 0;
650              --ret, dx = aq_ring_next_dx(ring, dx)) {
651                 dx_buff = &ring->buff_ring[dx];
652
653                 if (!dx_buff->pa)
654                         continue;
655                 if (unlikely(dx_buff->is_sop))
656                         dma_unmap_single(dev, dx_buff->pa, dx_buff->len,
657                                          DMA_TO_DEVICE);
658                 else
659                         dma_unmap_page(dev, dx_buff->pa, dx_buff->len,
660                                        DMA_TO_DEVICE);
661         }
662
663 exit:
664         return ret;
665 }
666
667 unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
668                             struct aq_ring_s *ring)
669 {
670         unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
671         struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self);
672         struct device *dev = aq_nic_get_dev(self);
673         struct aq_ring_buff_s *first = NULL;
674         u8 ipver = ip_hdr(skb)->version;
675         struct aq_ring_buff_s *dx_buff;
676         bool need_context_tag = false;
677         unsigned int frag_count = 0U;
678         unsigned int ret = 0U;
679         unsigned int dx;
680         u8 l4proto = 0;
681
682         if (ipver == 4)
683                 l4proto = ip_hdr(skb)->protocol;
684         else if (ipver == 6)
685                 l4proto = ipv6_hdr(skb)->nexthdr;
686
687         dx = ring->sw_tail;
688         dx_buff = &ring->buff_ring[dx];
689         dx_buff->flags = 0U;
690
691         if (unlikely(skb_is_gso(skb))) {
692                 dx_buff->mss = skb_shinfo(skb)->gso_size;
693                 if (l4proto == IPPROTO_TCP) {
694                         dx_buff->is_gso_tcp = 1U;
695                         dx_buff->len_l4 = tcp_hdrlen(skb);
696                 } else if (l4proto == IPPROTO_UDP) {
697                         dx_buff->is_gso_udp = 1U;
698                         dx_buff->len_l4 = sizeof(struct udphdr);
699                         /* UDP GSO Hardware does not replace packet length. */
700                         udp_hdr(skb)->len = htons(dx_buff->mss +
701                                                   dx_buff->len_l4);
702                 } else {
703                         WARN_ONCE(true, "Bad GSO mode");
704                         goto exit;
705                 }
706                 dx_buff->len_pkt = skb->len;
707                 dx_buff->len_l2 = ETH_HLEN;
708                 dx_buff->len_l3 = skb_network_header_len(skb);
709                 dx_buff->eop_index = 0xffffU;
710                 dx_buff->is_ipv6 = (ipver == 6);
711                 need_context_tag = true;
712         }
713
714         if (cfg->is_vlan_tx_insert && skb_vlan_tag_present(skb)) {
715                 dx_buff->vlan_tx_tag = skb_vlan_tag_get(skb);
716                 dx_buff->len_pkt = skb->len;
717                 dx_buff->is_vlan = 1U;
718                 need_context_tag = true;
719         }
720
721         if (need_context_tag) {
722                 dx = aq_ring_next_dx(ring, dx);
723                 dx_buff = &ring->buff_ring[dx];
724                 dx_buff->flags = 0U;
725                 ++ret;
726         }
727
728         dx_buff->len = skb_headlen(skb);
729         dx_buff->pa = dma_map_single(dev,
730                                      skb->data,
731                                      dx_buff->len,
732                                      DMA_TO_DEVICE);
733
734         if (unlikely(dma_mapping_error(dev, dx_buff->pa))) {
735                 ret = 0;
736                 goto exit;
737         }
738
739         first = dx_buff;
740         dx_buff->len_pkt = skb->len;
741         dx_buff->is_sop = 1U;
742         dx_buff->is_mapped = 1U;
743         ++ret;
744
745         if (skb->ip_summed == CHECKSUM_PARTIAL) {
746                 dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol);
747                 dx_buff->is_tcp_cso = (l4proto == IPPROTO_TCP);
748                 dx_buff->is_udp_cso = (l4proto == IPPROTO_UDP);
749         }
750
751         for (; nr_frags--; ++frag_count) {
752                 unsigned int frag_len = 0U;
753                 unsigned int buff_offset = 0U;
754                 unsigned int buff_size = 0U;
755                 dma_addr_t frag_pa;
756                 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count];
757
758                 frag_len = skb_frag_size(frag);
759
760                 while (frag_len) {
761                         if (frag_len > AQ_CFG_TX_FRAME_MAX)
762                                 buff_size = AQ_CFG_TX_FRAME_MAX;
763                         else
764                                 buff_size = frag_len;
765
766                         frag_pa = skb_frag_dma_map(dev,
767                                                    frag,
768                                                    buff_offset,
769                                                    buff_size,
770                                                    DMA_TO_DEVICE);
771
772                         if (unlikely(dma_mapping_error(dev,
773                                                        frag_pa)))
774                                 goto mapping_error;
775
776                         dx = aq_ring_next_dx(ring, dx);
777                         dx_buff = &ring->buff_ring[dx];
778
779                         dx_buff->flags = 0U;
780                         dx_buff->len = buff_size;
781                         dx_buff->pa = frag_pa;
782                         dx_buff->is_mapped = 1U;
783                         dx_buff->eop_index = 0xffffU;
784
785                         frag_len -= buff_size;
786                         buff_offset += buff_size;
787
788                         ++ret;
789                 }
790         }
791
792         first->eop_index = dx;
793         dx_buff->is_eop = 1U;
794         dx_buff->skb = skb;
795         dx_buff->xdpf = NULL;
796         goto exit;
797
798 mapping_error:
799         for (dx = ring->sw_tail;
800              ret > 0;
801              --ret, dx = aq_ring_next_dx(ring, dx)) {
802                 dx_buff = &ring->buff_ring[dx];
803
804                 if (!(dx_buff->is_gso_tcp || dx_buff->is_gso_udp) &&
805                     !dx_buff->is_vlan && dx_buff->pa) {
806                         if (unlikely(dx_buff->is_sop)) {
807                                 dma_unmap_single(dev,
808                                                  dx_buff->pa,
809                                                  dx_buff->len,
810                                                  DMA_TO_DEVICE);
811                         } else {
812                                 dma_unmap_page(dev,
813                                                dx_buff->pa,
814                                                dx_buff->len,
815                                                DMA_TO_DEVICE);
816                         }
817                 }
818         }
819
820 exit:
821         return ret;
822 }
823
824 int aq_nic_xmit_xdpf(struct aq_nic_s *aq_nic, struct aq_ring_s *tx_ring,
825                      struct xdp_frame *xdpf)
826 {
827         u16 queue_index = AQ_NIC_RING2QMAP(aq_nic, tx_ring->idx);
828         struct net_device *ndev = aq_nic_get_ndev(aq_nic);
829         struct skb_shared_info *sinfo;
830         int cpu = smp_processor_id();
831         int err = NETDEV_TX_BUSY;
832         struct netdev_queue *nq;
833         unsigned int frags = 1;
834
835         if (xdp_frame_has_frags(xdpf)) {
836                 sinfo = xdp_get_shared_info_from_frame(xdpf);
837                 frags += sinfo->nr_frags;
838         }
839
840         if (frags > AQ_CFG_SKB_FRAGS_MAX)
841                 return err;
842
843         nq = netdev_get_tx_queue(ndev, tx_ring->idx);
844         __netif_tx_lock(nq, cpu);
845
846         aq_ring_update_queue_state(tx_ring);
847
848         /* Above status update may stop the queue. Check this. */
849         if (__netif_subqueue_stopped(aq_nic_get_ndev(aq_nic), queue_index))
850                 goto out;
851
852         frags = aq_nic_map_xdp(aq_nic, xdpf, tx_ring);
853         if (likely(frags))
854                 err = aq_nic->aq_hw_ops->hw_ring_tx_xmit(aq_nic->aq_hw, tx_ring,
855                                                          frags);
856 out:
857         __netif_tx_unlock(nq);
858
859         return err;
860 }
861
862 int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
863 {
864         struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self);
865         unsigned int vec = skb->queue_mapping % cfg->vecs;
866         unsigned int tc = skb->queue_mapping / cfg->vecs;
867         struct aq_ring_s *ring = NULL;
868         unsigned int frags = 0U;
869         int err = NETDEV_TX_OK;
870
871         frags = skb_shinfo(skb)->nr_frags + 1;
872
873         ring = self->aq_ring_tx[AQ_NIC_CFG_TCVEC2RING(cfg, tc, vec)];
874
875         if (frags > AQ_CFG_SKB_FRAGS_MAX) {
876                 dev_kfree_skb_any(skb);
877                 goto err_exit;
878         }
879
880         aq_ring_update_queue_state(ring);
881
882         if (cfg->priv_flags & BIT(AQ_HW_LOOPBACK_DMA_NET)) {
883                 err = NETDEV_TX_BUSY;
884                 goto err_exit;
885         }
886
887         /* Above status update may stop the queue. Check this. */
888         if (__netif_subqueue_stopped(self->ndev,
889                                      AQ_NIC_RING2QMAP(self, ring->idx))) {
890                 err = NETDEV_TX_BUSY;
891                 goto err_exit;
892         }
893
894         frags = aq_nic_map_skb(self, skb, ring);
895
896         if (likely(frags)) {
897                 err = self->aq_hw_ops->hw_ring_tx_xmit(self->aq_hw,
898                                                        ring, frags);
899         } else {
900                 err = NETDEV_TX_BUSY;
901         }
902
903 err_exit:
904         return err;
905 }
906
907 int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self)
908 {
909         return self->aq_hw_ops->hw_interrupt_moderation_set(self->aq_hw);
910 }
911
912 int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags)
913 {
914         int err = 0;
915
916         err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw, flags);
917         if (err < 0)
918                 goto err_exit;
919
920         self->packet_filter = flags;
921
922 err_exit:
923         return err;
924 }
925
926 int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
927 {
928         const struct aq_hw_ops *hw_ops = self->aq_hw_ops;
929         struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
930         unsigned int packet_filter = ndev->flags;
931         struct netdev_hw_addr *ha = NULL;
932         unsigned int i = 0U;
933         int err = 0;
934
935         self->mc_list.count = 0;
936         if (netdev_uc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
937                 packet_filter |= IFF_PROMISC;
938         } else {
939                 netdev_for_each_uc_addr(ha, ndev) {
940                         ether_addr_copy(self->mc_list.ar[i++], ha->addr);
941                 }
942         }
943
944         cfg->is_mc_list_enabled = !!(packet_filter & IFF_MULTICAST);
945         if (cfg->is_mc_list_enabled) {
946                 if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
947                         packet_filter |= IFF_ALLMULTI;
948                 } else {
949                         netdev_for_each_mc_addr(ha, ndev) {
950                                 ether_addr_copy(self->mc_list.ar[i++],
951                                                 ha->addr);
952                         }
953                 }
954         }
955
956         if (i > 0 && i <= AQ_HW_MULTICAST_ADDRESS_MAX) {
957                 self->mc_list.count = i;
958                 err = hw_ops->hw_multicast_list_set(self->aq_hw,
959                                                     self->mc_list.ar,
960                                                     self->mc_list.count);
961                 if (err < 0)
962                         return err;
963         }
964
965         return aq_nic_set_packet_filter(self, packet_filter);
966 }
967
968 int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
969 {
970         self->aq_nic_cfg.mtu = new_mtu;
971
972         return 0;
973 }
974
975 int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev)
976 {
977         return self->aq_hw_ops->hw_set_mac_address(self->aq_hw, ndev->dev_addr);
978 }
979
980 unsigned int aq_nic_get_link_speed(struct aq_nic_s *self)
981 {
982         return self->link_status.mbps;
983 }
984
985 int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p)
986 {
987         u32 *regs_buff = p;
988         int err = 0;
989
990         if (unlikely(!self->aq_hw_ops->hw_get_regs))
991                 return -EOPNOTSUPP;
992
993         regs->version = 1;
994
995         err = self->aq_hw_ops->hw_get_regs(self->aq_hw,
996                                            self->aq_nic_cfg.aq_hw_caps,
997                                            regs_buff);
998         if (err < 0)
999                 goto err_exit;
1000
1001 err_exit:
1002         return err;
1003 }
1004
1005 int aq_nic_get_regs_count(struct aq_nic_s *self)
1006 {
1007         if (unlikely(!self->aq_hw_ops->hw_get_regs))
1008                 return 0;
1009
1010         return self->aq_nic_cfg.aq_hw_caps->mac_regs_count;
1011 }
1012
1013 u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
1014 {
1015         struct aq_stats_s *stats;
1016         unsigned int count = 0U;
1017         unsigned int i = 0U;
1018         unsigned int tc;
1019
1020         if (self->aq_fw_ops->update_stats) {
1021                 mutex_lock(&self->fwreq_mutex);
1022                 self->aq_fw_ops->update_stats(self->aq_hw);
1023                 mutex_unlock(&self->fwreq_mutex);
1024         }
1025         stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
1026
1027         if (!stats)
1028                 goto err_exit;
1029
1030         data[i] = stats->uprc + stats->mprc + stats->bprc;
1031         data[++i] = stats->uprc;
1032         data[++i] = stats->mprc;
1033         data[++i] = stats->bprc;
1034         data[++i] = stats->erpt;
1035         data[++i] = stats->uptc + stats->mptc + stats->bptc;
1036         data[++i] = stats->uptc;
1037         data[++i] = stats->mptc;
1038         data[++i] = stats->bptc;
1039         data[++i] = stats->ubrc;
1040         data[++i] = stats->ubtc;
1041         data[++i] = stats->mbrc;
1042         data[++i] = stats->mbtc;
1043         data[++i] = stats->bbrc;
1044         data[++i] = stats->bbtc;
1045         if (stats->brc)
1046                 data[++i] = stats->brc;
1047         else
1048                 data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
1049         if (stats->btc)
1050                 data[++i] = stats->btc;
1051         else
1052                 data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
1053         data[++i] = stats->dma_pkt_rc;
1054         data[++i] = stats->dma_pkt_tc;
1055         data[++i] = stats->dma_oct_rc;
1056         data[++i] = stats->dma_oct_tc;
1057         data[++i] = stats->dpc;
1058
1059         i++;
1060
1061         data += i;
1062
1063         for (tc = 0U; tc < self->aq_nic_cfg.tcs; tc++) {
1064                 for (i = 0U; self->aq_vecs > i; ++i) {
1065                         if (!self->aq_vec[i])
1066                                 break;
1067                         data += count;
1068                         count = aq_vec_get_sw_stats(self->aq_vec[i], tc, data);
1069                 }
1070         }
1071
1072         data += count;
1073
1074 err_exit:
1075         return data;
1076 }
1077
1078 static void aq_nic_update_ndev_stats(struct aq_nic_s *self)
1079 {
1080         struct aq_stats_s *stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
1081         struct net_device *ndev = self->ndev;
1082
1083         ndev->stats.rx_packets = stats->dma_pkt_rc;
1084         ndev->stats.rx_bytes = stats->dma_oct_rc;
1085         ndev->stats.rx_errors = stats->erpr;
1086         ndev->stats.rx_dropped = stats->dpc;
1087         ndev->stats.tx_packets = stats->dma_pkt_tc;
1088         ndev->stats.tx_bytes = stats->dma_oct_tc;
1089         ndev->stats.tx_errors = stats->erpt;
1090         ndev->stats.multicast = stats->mprc;
1091 }
1092
1093 void aq_nic_get_link_ksettings(struct aq_nic_s *self,
1094                                struct ethtool_link_ksettings *cmd)
1095 {
1096         u32 lp_link_speed_msk;
1097
1098         if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
1099                 cmd->base.port = PORT_FIBRE;
1100         else
1101                 cmd->base.port = PORT_TP;
1102
1103         cmd->base.duplex = DUPLEX_UNKNOWN;
1104         if (self->link_status.mbps)
1105                 cmd->base.duplex = self->link_status.full_duplex ?
1106                                    DUPLEX_FULL : DUPLEX_HALF;
1107         cmd->base.autoneg = self->aq_nic_cfg.is_autoneg;
1108
1109         ethtool_link_ksettings_zero_link_mode(cmd, supported);
1110
1111         if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10G)
1112                 ethtool_link_ksettings_add_link_mode(cmd, supported,
1113                                                      10000baseT_Full);
1114
1115         if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_5G)
1116                 ethtool_link_ksettings_add_link_mode(cmd, supported,
1117                                                      5000baseT_Full);
1118
1119         if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_2G5)
1120                 ethtool_link_ksettings_add_link_mode(cmd, supported,
1121                                                      2500baseT_Full);
1122
1123         if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_1G)
1124                 ethtool_link_ksettings_add_link_mode(cmd, supported,
1125                                                      1000baseT_Full);
1126
1127         if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_1G_HALF)
1128                 ethtool_link_ksettings_add_link_mode(cmd, supported,
1129                                                      1000baseT_Half);
1130
1131         if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_100M)
1132                 ethtool_link_ksettings_add_link_mode(cmd, supported,
1133                                                      100baseT_Full);
1134
1135         if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_100M_HALF)
1136                 ethtool_link_ksettings_add_link_mode(cmd, supported,
1137                                                      100baseT_Half);
1138
1139         if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10M)
1140                 ethtool_link_ksettings_add_link_mode(cmd, supported,
1141                                                      10baseT_Full);
1142
1143         if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10M_HALF)
1144                 ethtool_link_ksettings_add_link_mode(cmd, supported,
1145                                                      10baseT_Half);
1146
1147         if (self->aq_nic_cfg.aq_hw_caps->flow_control) {
1148                 ethtool_link_ksettings_add_link_mode(cmd, supported,
1149                                                      Pause);
1150                 ethtool_link_ksettings_add_link_mode(cmd, supported,
1151                                                      Asym_Pause);
1152         }
1153
1154         ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
1155
1156         if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
1157                 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
1158         else
1159                 ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
1160
1161         ethtool_link_ksettings_zero_link_mode(cmd, advertising);
1162
1163         if (self->aq_nic_cfg.is_autoneg)
1164                 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
1165
1166         if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10G)
1167                 ethtool_link_ksettings_add_link_mode(cmd, advertising,
1168                                                      10000baseT_Full);
1169
1170         if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_5G)
1171                 ethtool_link_ksettings_add_link_mode(cmd, advertising,
1172                                                      5000baseT_Full);
1173
1174         if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_2G5)
1175                 ethtool_link_ksettings_add_link_mode(cmd, advertising,
1176                                                      2500baseT_Full);
1177
1178         if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_1G)
1179                 ethtool_link_ksettings_add_link_mode(cmd, advertising,
1180                                                      1000baseT_Full);
1181
1182         if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_1G_HALF)
1183                 ethtool_link_ksettings_add_link_mode(cmd, advertising,
1184                                                      1000baseT_Half);
1185
1186         if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_100M)
1187                 ethtool_link_ksettings_add_link_mode(cmd, advertising,
1188                                                      100baseT_Full);
1189
1190         if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_100M_HALF)
1191                 ethtool_link_ksettings_add_link_mode(cmd, advertising,
1192                                                      100baseT_Half);
1193
1194         if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10M)
1195                 ethtool_link_ksettings_add_link_mode(cmd, advertising,
1196                                                      10baseT_Full);
1197
1198         if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10M_HALF)
1199                 ethtool_link_ksettings_add_link_mode(cmd, advertising,
1200                                                      10baseT_Half);
1201
1202         if (self->aq_nic_cfg.fc.cur & AQ_NIC_FC_RX)
1203                 ethtool_link_ksettings_add_link_mode(cmd, advertising,
1204                                                      Pause);
1205
1206         /* Asym is when either RX or TX, but not both */
1207         if (!!(self->aq_nic_cfg.fc.cur & AQ_NIC_FC_TX) ^
1208             !!(self->aq_nic_cfg.fc.cur & AQ_NIC_FC_RX))
1209                 ethtool_link_ksettings_add_link_mode(cmd, advertising,
1210                                                      Asym_Pause);
1211
1212         if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
1213                 ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
1214         else
1215                 ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
1216
1217         ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising);
1218         lp_link_speed_msk = self->aq_hw->aq_link_status.lp_link_speed_msk;
1219
1220         if (lp_link_speed_msk & AQ_NIC_RATE_10G)
1221                 ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
1222                                                      10000baseT_Full);
1223
1224         if (lp_link_speed_msk & AQ_NIC_RATE_5G)
1225                 ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
1226                                                      5000baseT_Full);
1227
1228         if (lp_link_speed_msk & AQ_NIC_RATE_2G5)
1229                 ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
1230                                                      2500baseT_Full);
1231
1232         if (lp_link_speed_msk & AQ_NIC_RATE_1G)
1233                 ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
1234                                                      1000baseT_Full);
1235
1236         if (lp_link_speed_msk & AQ_NIC_RATE_1G_HALF)
1237                 ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
1238                                                      1000baseT_Half);
1239
1240         if (lp_link_speed_msk & AQ_NIC_RATE_100M)
1241                 ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
1242                                                      100baseT_Full);
1243
1244         if (lp_link_speed_msk & AQ_NIC_RATE_100M_HALF)
1245                 ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
1246                                                      100baseT_Half);
1247
1248         if (lp_link_speed_msk & AQ_NIC_RATE_10M)
1249                 ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
1250                                                      10baseT_Full);
1251
1252         if (lp_link_speed_msk & AQ_NIC_RATE_10M_HALF)
1253                 ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
1254                                                      10baseT_Half);
1255
1256         if (self->aq_hw->aq_link_status.lp_flow_control & AQ_NIC_FC_RX)
1257                 ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
1258                                                      Pause);
1259         if (!!(self->aq_hw->aq_link_status.lp_flow_control & AQ_NIC_FC_TX) ^
1260             !!(self->aq_hw->aq_link_status.lp_flow_control & AQ_NIC_FC_RX))
1261                 ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
1262                                                      Asym_Pause);
1263 }
1264
1265 int aq_nic_set_link_ksettings(struct aq_nic_s *self,
1266                               const struct ethtool_link_ksettings *cmd)
1267 {
1268         int fduplex = (cmd->base.duplex == DUPLEX_FULL);
1269         u32 speed = cmd->base.speed;
1270         u32 rate = 0U;
1271         int err = 0;
1272
1273         if (!fduplex && speed > SPEED_1000) {
1274                 err = -EINVAL;
1275                 goto err_exit;
1276         }
1277
1278         if (cmd->base.autoneg == AUTONEG_ENABLE) {
1279                 rate = self->aq_nic_cfg.aq_hw_caps->link_speed_msk;
1280                 self->aq_nic_cfg.is_autoneg = true;
1281         } else {
1282                 switch (speed) {
1283                 case SPEED_10:
1284                         rate = fduplex ? AQ_NIC_RATE_10M : AQ_NIC_RATE_10M_HALF;
1285                         break;
1286
1287                 case SPEED_100:
1288                         rate = fduplex ? AQ_NIC_RATE_100M
1289                                        : AQ_NIC_RATE_100M_HALF;
1290                         break;
1291
1292                 case SPEED_1000:
1293                         rate = fduplex ? AQ_NIC_RATE_1G : AQ_NIC_RATE_1G_HALF;
1294                         break;
1295
1296                 case SPEED_2500:
1297                         rate = AQ_NIC_RATE_2G5;
1298                         break;
1299
1300                 case SPEED_5000:
1301                         rate = AQ_NIC_RATE_5G;
1302                         break;
1303
1304                 case SPEED_10000:
1305                         rate = AQ_NIC_RATE_10G;
1306                         break;
1307
1308                 default:
1309                         err = -1;
1310                         goto err_exit;
1311                 }
1312                 if (!(self->aq_nic_cfg.aq_hw_caps->link_speed_msk & rate)) {
1313                         err = -1;
1314                         goto err_exit;
1315                 }
1316
1317                 self->aq_nic_cfg.is_autoneg = false;
1318         }
1319
1320         mutex_lock(&self->fwreq_mutex);
1321         err = self->aq_fw_ops->set_link_speed(self->aq_hw, rate);
1322         mutex_unlock(&self->fwreq_mutex);
1323         if (err < 0)
1324                 goto err_exit;
1325
1326         self->aq_nic_cfg.link_speed_msk = rate;
1327
1328 err_exit:
1329         return err;
1330 }
1331
1332 struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self)
1333 {
1334         return &self->aq_nic_cfg;
1335 }
1336
1337 u32 aq_nic_get_fw_version(struct aq_nic_s *self)
1338 {
1339         return self->aq_hw_ops->hw_get_fw_version(self->aq_hw);
1340 }
1341
1342 int aq_nic_set_loopback(struct aq_nic_s *self)
1343 {
1344         struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
1345
1346         if (!self->aq_hw_ops->hw_set_loopback ||
1347             !self->aq_fw_ops->set_phyloopback)
1348                 return -EOPNOTSUPP;
1349
1350         mutex_lock(&self->fwreq_mutex);
1351         self->aq_hw_ops->hw_set_loopback(self->aq_hw,
1352                                          AQ_HW_LOOPBACK_DMA_SYS,
1353                                          !!(cfg->priv_flags &
1354                                             BIT(AQ_HW_LOOPBACK_DMA_SYS)));
1355
1356         self->aq_hw_ops->hw_set_loopback(self->aq_hw,
1357                                          AQ_HW_LOOPBACK_PKT_SYS,
1358                                          !!(cfg->priv_flags &
1359                                             BIT(AQ_HW_LOOPBACK_PKT_SYS)));
1360
1361         self->aq_hw_ops->hw_set_loopback(self->aq_hw,
1362                                          AQ_HW_LOOPBACK_DMA_NET,
1363                                          !!(cfg->priv_flags &
1364                                             BIT(AQ_HW_LOOPBACK_DMA_NET)));
1365
1366         self->aq_fw_ops->set_phyloopback(self->aq_hw,
1367                                          AQ_HW_LOOPBACK_PHYINT_SYS,
1368                                          !!(cfg->priv_flags &
1369                                             BIT(AQ_HW_LOOPBACK_PHYINT_SYS)));
1370
1371         self->aq_fw_ops->set_phyloopback(self->aq_hw,
1372                                          AQ_HW_LOOPBACK_PHYEXT_SYS,
1373                                          !!(cfg->priv_flags &
1374                                             BIT(AQ_HW_LOOPBACK_PHYEXT_SYS)));
1375         mutex_unlock(&self->fwreq_mutex);
1376
1377         return 0;
1378 }
1379
1380 int aq_nic_stop(struct aq_nic_s *self)
1381 {
1382         unsigned int i = 0U;
1383
1384         netif_tx_disable(self->ndev);
1385         netif_carrier_off(self->ndev);
1386
1387         del_timer_sync(&self->service_timer);
1388         cancel_work_sync(&self->service_task);
1389
1390         self->aq_hw_ops->hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK);
1391
1392         if (self->aq_nic_cfg.is_polling)
1393                 del_timer_sync(&self->polling_timer);
1394         else
1395                 aq_pci_func_free_irqs(self);
1396
1397         aq_ptp_irq_free(self);
1398
1399         for (i = 0U; self->aq_vecs > i; ++i)
1400                 aq_vec_stop(self->aq_vec[i]);
1401
1402         aq_ptp_ring_stop(self);
1403
1404         return self->aq_hw_ops->hw_stop(self->aq_hw);
1405 }
1406
1407 void aq_nic_set_power(struct aq_nic_s *self)
1408 {
1409         if (self->power_state != AQ_HW_POWER_STATE_D0 ||
1410             self->aq_hw->aq_nic_cfg->wol)
1411                 if (likely(self->aq_fw_ops->set_power)) {
1412                         mutex_lock(&self->fwreq_mutex);
1413                         self->aq_fw_ops->set_power(self->aq_hw,
1414                                                    self->power_state,
1415                                                    self->ndev->dev_addr);
1416                         mutex_unlock(&self->fwreq_mutex);
1417                 }
1418 }
1419
1420 void aq_nic_deinit(struct aq_nic_s *self, bool link_down)
1421 {
1422         struct aq_vec_s *aq_vec = NULL;
1423         unsigned int i = 0U;
1424
1425         if (!self)
1426                 goto err_exit;
1427
1428         for (i = 0U; i < self->aq_vecs; i++) {
1429                 aq_vec = self->aq_vec[i];
1430                 aq_vec_deinit(aq_vec);
1431                 aq_vec_ring_free(aq_vec);
1432         }
1433
1434         aq_ptp_unregister(self);
1435         aq_ptp_ring_deinit(self);
1436         aq_ptp_ring_free(self);
1437         aq_ptp_free(self);
1438
1439         if (likely(self->aq_fw_ops->deinit) && link_down) {
1440                 mutex_lock(&self->fwreq_mutex);
1441                 self->aq_fw_ops->deinit(self->aq_hw);
1442                 mutex_unlock(&self->fwreq_mutex);
1443         }
1444
1445 err_exit:;
1446 }
1447
1448 void aq_nic_free_vectors(struct aq_nic_s *self)
1449 {
1450         unsigned int i = 0U;
1451
1452         if (!self)
1453                 goto err_exit;
1454
1455         for (i = ARRAY_SIZE(self->aq_vec); i--;) {
1456                 if (self->aq_vec[i]) {
1457                         aq_vec_free(self->aq_vec[i]);
1458                         self->aq_vec[i] = NULL;
1459                 }
1460         }
1461
1462 err_exit:;
1463 }
1464
1465 int aq_nic_realloc_vectors(struct aq_nic_s *self)
1466 {
1467         struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self);
1468
1469         aq_nic_free_vectors(self);
1470
1471         for (self->aq_vecs = 0; self->aq_vecs < cfg->vecs; self->aq_vecs++) {
1472                 self->aq_vec[self->aq_vecs] = aq_vec_alloc(self, self->aq_vecs,
1473                                                            cfg);
1474                 if (unlikely(!self->aq_vec[self->aq_vecs]))
1475                         return -ENOMEM;
1476         }
1477
1478         return 0;
1479 }
1480
1481 void aq_nic_shutdown(struct aq_nic_s *self)
1482 {
1483         int err = 0;
1484
1485         if (!self->ndev)
1486                 return;
1487
1488         rtnl_lock();
1489
1490         netif_device_detach(self->ndev);
1491
1492         if (netif_running(self->ndev)) {
1493                 err = aq_nic_stop(self);
1494                 if (err < 0)
1495                         goto err_exit;
1496         }
1497         aq_nic_deinit(self, !self->aq_hw->aq_nic_cfg->wol);
1498         aq_nic_set_power(self);
1499
1500 err_exit:
1501         rtnl_unlock();
1502 }
1503
1504 u8 aq_nic_reserve_filter(struct aq_nic_s *self, enum aq_rx_filter_type type)
1505 {
1506         u8 location = 0xFF;
1507         u32 fltr_cnt;
1508         u32 n_bit;
1509
1510         switch (type) {
1511         case aq_rx_filter_ethertype:
1512                 location = AQ_RX_LAST_LOC_FETHERT - AQ_RX_FIRST_LOC_FETHERT -
1513                            self->aq_hw_rx_fltrs.fet_reserved_count;
1514                 self->aq_hw_rx_fltrs.fet_reserved_count++;
1515                 break;
1516         case aq_rx_filter_l3l4:
1517                 fltr_cnt = AQ_RX_LAST_LOC_FL3L4 - AQ_RX_FIRST_LOC_FL3L4;
1518                 n_bit = fltr_cnt - self->aq_hw_rx_fltrs.fl3l4.reserved_count;
1519
1520                 self->aq_hw_rx_fltrs.fl3l4.active_ipv4 |= BIT(n_bit);
1521                 self->aq_hw_rx_fltrs.fl3l4.reserved_count++;
1522                 location = n_bit;
1523                 break;
1524         default:
1525                 break;
1526         }
1527
1528         return location;
1529 }
1530
1531 void aq_nic_release_filter(struct aq_nic_s *self, enum aq_rx_filter_type type,
1532                            u32 location)
1533 {
1534         switch (type) {
1535         case aq_rx_filter_ethertype:
1536                 self->aq_hw_rx_fltrs.fet_reserved_count--;
1537                 break;
1538         case aq_rx_filter_l3l4:
1539                 self->aq_hw_rx_fltrs.fl3l4.reserved_count--;
1540                 self->aq_hw_rx_fltrs.fl3l4.active_ipv4 &= ~BIT(location);
1541                 break;
1542         default:
1543                 break;
1544         }
1545 }
1546
1547 int aq_nic_set_downshift(struct aq_nic_s *self, int val)
1548 {
1549         int err = 0;
1550         struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
1551
1552         if (!self->aq_fw_ops->set_downshift)
1553                 return -EOPNOTSUPP;
1554
1555         if (val > 15) {
1556                 netdev_err(self->ndev, "downshift counter should be <= 15\n");
1557                 return -EINVAL;
1558         }
1559         cfg->downshift_counter = val;
1560
1561         mutex_lock(&self->fwreq_mutex);
1562         err = self->aq_fw_ops->set_downshift(self->aq_hw, cfg->downshift_counter);
1563         mutex_unlock(&self->fwreq_mutex);
1564
1565         return err;
1566 }
1567
1568 int aq_nic_set_media_detect(struct aq_nic_s *self, int val)
1569 {
1570         struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
1571         int err = 0;
1572
1573         if (!self->aq_fw_ops->set_media_detect)
1574                 return -EOPNOTSUPP;
1575
1576         if (val > 0 && val != AQ_HW_MEDIA_DETECT_CNT) {
1577                 netdev_err(self->ndev, "EDPD on this device could have only fixed value of %d\n",
1578                            AQ_HW_MEDIA_DETECT_CNT);
1579                 return -EINVAL;
1580         }
1581
1582         mutex_lock(&self->fwreq_mutex);
1583         err = self->aq_fw_ops->set_media_detect(self->aq_hw, !!val);
1584         mutex_unlock(&self->fwreq_mutex);
1585
1586         /* msecs plays no role - configuration is always fixed in PHY */
1587         if (!err)
1588                 cfg->is_media_detect = !!val;
1589
1590         return err;
1591 }
1592
1593 int aq_nic_setup_tc_mqprio(struct aq_nic_s *self, u32 tcs, u8 *prio_tc_map)
1594 {
1595         struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
1596         const unsigned int prev_vecs = cfg->vecs;
1597         bool ndev_running;
1598         int err = 0;
1599         int i;
1600
1601         /* if already the same configuration or
1602          * disable request (tcs is 0) and we already is disabled
1603          */
1604         if (tcs == cfg->tcs || (tcs == 0 && !cfg->is_qos))
1605                 return 0;
1606
1607         ndev_running = netif_running(self->ndev);
1608         if (ndev_running)
1609                 dev_close(self->ndev);
1610
1611         cfg->tcs = tcs;
1612         if (cfg->tcs == 0)
1613                 cfg->tcs = 1;
1614         if (prio_tc_map)
1615                 memcpy(cfg->prio_tc_map, prio_tc_map, sizeof(cfg->prio_tc_map));
1616         else
1617                 for (i = 0; i < sizeof(cfg->prio_tc_map); i++)
1618                         cfg->prio_tc_map[i] = cfg->tcs * i / 8;
1619
1620         cfg->is_qos = !!tcs;
1621         cfg->is_ptp = (cfg->tcs <= AQ_HW_PTP_TC);
1622         if (!cfg->is_ptp)
1623                 netdev_warn(self->ndev, "%s\n",
1624                             "PTP is auto disabled due to requested TC count.");
1625
1626         netdev_set_num_tc(self->ndev, cfg->tcs);
1627
1628         /* Changing the number of TCs might change the number of vectors */
1629         aq_nic_cfg_update_num_vecs(self);
1630         if (prev_vecs != cfg->vecs) {
1631                 err = aq_nic_realloc_vectors(self);
1632                 if (err)
1633                         goto err_exit;
1634         }
1635
1636         if (ndev_running)
1637                 err = dev_open(self->ndev, NULL);
1638
1639 err_exit:
1640         return err;
1641 }
1642
1643 int aq_nic_setup_tc_max_rate(struct aq_nic_s *self, const unsigned int tc,
1644                              const u32 max_rate)
1645 {
1646         struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
1647
1648         if (tc >= AQ_CFG_TCS_MAX)
1649                 return -EINVAL;
1650
1651         if (max_rate && max_rate < 10) {
1652                 netdev_warn(self->ndev,
1653                         "Setting %s to the minimum usable value of %dMbps.\n",
1654                         "max rate", 10);
1655                 cfg->tc_max_rate[tc] = 10;
1656         } else {
1657                 cfg->tc_max_rate[tc] = max_rate;
1658         }
1659
1660         return 0;
1661 }
1662
1663 int aq_nic_setup_tc_min_rate(struct aq_nic_s *self, const unsigned int tc,
1664                              const u32 min_rate)
1665 {
1666         struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
1667
1668         if (tc >= AQ_CFG_TCS_MAX)
1669                 return -EINVAL;
1670
1671         if (min_rate)
1672                 set_bit(tc, &cfg->tc_min_rate_msk);
1673         else
1674                 clear_bit(tc, &cfg->tc_min_rate_msk);
1675
1676         if (min_rate && min_rate < 20) {
1677                 netdev_warn(self->ndev,
1678                         "Setting %s to the minimum usable value of %dMbps.\n",
1679                         "min rate", 20);
1680                 cfg->tc_min_rate[tc] = 20;
1681         } else {
1682                 cfg->tc_min_rate[tc] = min_rate;
1683         }
1684
1685         return 0;
1686 }