selftests: drivers/dma-buf: Fix implicit declaration warns
[platform/kernel/linux-rpi.git] / drivers / net / ethernet / aquantia / atlantic / aq_pci_func.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Atlantic Network Driver
3  *
4  * Copyright (C) 2014-2019 aQuantia Corporation
5  * Copyright (C) 2019-2020 Marvell International Ltd.
6  */
7
8 /* File aq_pci_func.c: Definition of PCI functions. */
9
10 #include <linux/interrupt.h>
11 #include <linux/module.h>
12
13 #include "aq_main.h"
14 #include "aq_nic.h"
15 #include "aq_vec.h"
16 #include "aq_hw.h"
17 #include "aq_pci_func.h"
18 #include "hw_atl/hw_atl_a0.h"
19 #include "hw_atl/hw_atl_b0.h"
20 #include "hw_atl2/hw_atl2.h"
21 #include "aq_filters.h"
22 #include "aq_drvinfo.h"
23 #include "aq_macsec.h"
24
25 static const struct pci_device_id aq_pci_tbl[] = {
26         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_0001), },
27         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D100), },
28         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D107), },
29         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D108), },
30         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D109), },
31
32         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC100), },
33         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC107), },
34         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC108), },
35         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC109), },
36         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111), },
37         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112), },
38
39         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC100S), },
40         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC107S), },
41         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC108S), },
42         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC109S), },
43         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111S), },
44         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112S), },
45
46         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113DEV), },
47         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113CS), },
48         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC114CS), },
49         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113), },
50         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113C), },
51         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC115C), },
52
53         {}
54 };
55
56 static const struct aq_board_revision_s hw_atl_boards[] = {
57         { AQ_DEVICE_ID_0001,    AQ_HWREV_1,     &hw_atl_ops_a0, &hw_atl_a0_caps_aqc107, },
58         { AQ_DEVICE_ID_D100,    AQ_HWREV_1,     &hw_atl_ops_a0, &hw_atl_a0_caps_aqc100, },
59         { AQ_DEVICE_ID_D107,    AQ_HWREV_1,     &hw_atl_ops_a0, &hw_atl_a0_caps_aqc107, },
60         { AQ_DEVICE_ID_D108,    AQ_HWREV_1,     &hw_atl_ops_a0, &hw_atl_a0_caps_aqc108, },
61         { AQ_DEVICE_ID_D109,    AQ_HWREV_1,     &hw_atl_ops_a0, &hw_atl_a0_caps_aqc109, },
62
63         { AQ_DEVICE_ID_0001,    AQ_HWREV_2,     &hw_atl_ops_b0, &hw_atl_b0_caps_aqc107, },
64         { AQ_DEVICE_ID_D100,    AQ_HWREV_2,     &hw_atl_ops_b0, &hw_atl_b0_caps_aqc100, },
65         { AQ_DEVICE_ID_D107,    AQ_HWREV_2,     &hw_atl_ops_b0, &hw_atl_b0_caps_aqc107, },
66         { AQ_DEVICE_ID_D108,    AQ_HWREV_2,     &hw_atl_ops_b0, &hw_atl_b0_caps_aqc108, },
67         { AQ_DEVICE_ID_D109,    AQ_HWREV_2,     &hw_atl_ops_b0, &hw_atl_b0_caps_aqc109, },
68
69         { AQ_DEVICE_ID_AQC100,  AQ_HWREV_ANY,   &hw_atl_ops_b1, &hw_atl_b0_caps_aqc100, },
70         { AQ_DEVICE_ID_AQC107,  AQ_HWREV_ANY,   &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, },
71         { AQ_DEVICE_ID_AQC108,  AQ_HWREV_ANY,   &hw_atl_ops_b1, &hw_atl_b0_caps_aqc108, },
72         { AQ_DEVICE_ID_AQC109,  AQ_HWREV_ANY,   &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109, },
73         { AQ_DEVICE_ID_AQC111,  AQ_HWREV_ANY,   &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111, },
74         { AQ_DEVICE_ID_AQC112,  AQ_HWREV_ANY,   &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112, },
75
76         { AQ_DEVICE_ID_AQC100S, AQ_HWREV_ANY,   &hw_atl_ops_b1, &hw_atl_b0_caps_aqc100s, },
77         { AQ_DEVICE_ID_AQC107S, AQ_HWREV_ANY,   &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107s, },
78         { AQ_DEVICE_ID_AQC108S, AQ_HWREV_ANY,   &hw_atl_ops_b1, &hw_atl_b0_caps_aqc108s, },
79         { AQ_DEVICE_ID_AQC109S, AQ_HWREV_ANY,   &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109s, },
80         { AQ_DEVICE_ID_AQC111S, AQ_HWREV_ANY,   &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111s, },
81         { AQ_DEVICE_ID_AQC112S, AQ_HWREV_ANY,   &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112s, },
82
83         { AQ_DEVICE_ID_AQC113DEV,       AQ_HWREV_ANY,   &hw_atl2_ops, &hw_atl2_caps_aqc113, },
84         { AQ_DEVICE_ID_AQC113,          AQ_HWREV_ANY,   &hw_atl2_ops, &hw_atl2_caps_aqc113, },
85         { AQ_DEVICE_ID_AQC113CS,        AQ_HWREV_ANY,   &hw_atl2_ops, &hw_atl2_caps_aqc113, },
86         { AQ_DEVICE_ID_AQC114CS,        AQ_HWREV_ANY,   &hw_atl2_ops, &hw_atl2_caps_aqc113, },
87         { AQ_DEVICE_ID_AQC113C,         AQ_HWREV_ANY,   &hw_atl2_ops, &hw_atl2_caps_aqc113, },
88         { AQ_DEVICE_ID_AQC115C,         AQ_HWREV_ANY,   &hw_atl2_ops, &hw_atl2_caps_aqc113, },
89 };
90
91 MODULE_DEVICE_TABLE(pci, aq_pci_tbl);
92
93 static int aq_pci_probe_get_hw_by_id(struct pci_dev *pdev,
94                                      const struct aq_hw_ops **ops,
95                                      const struct aq_hw_caps_s **caps)
96 {
97         int i;
98
99         if (pdev->vendor != PCI_VENDOR_ID_AQUANTIA)
100                 return -EINVAL;
101
102         for (i = 0; i < ARRAY_SIZE(hw_atl_boards); i++) {
103                 if (hw_atl_boards[i].devid == pdev->device &&
104                     (hw_atl_boards[i].revision == AQ_HWREV_ANY ||
105                      hw_atl_boards[i].revision == pdev->revision)) {
106                         *ops = hw_atl_boards[i].ops;
107                         *caps = hw_atl_boards[i].caps;
108                         break;
109                 }
110         }
111
112         if (i == ARRAY_SIZE(hw_atl_boards))
113                 return -EINVAL;
114
115         return 0;
116 }
117
118 static int aq_pci_func_init(struct pci_dev *pdev)
119 {
120         int err;
121
122         err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
123         if (err)
124                 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
125         if (err) {
126                 err = -ENOSR;
127                 goto err_exit;
128         }
129
130         err = pci_request_regions(pdev, AQ_CFG_DRV_NAME "_mmio");
131         if (err < 0)
132                 goto err_exit;
133
134         pci_set_master(pdev);
135
136         return 0;
137
138 err_exit:
139         return err;
140 }
141
142 int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i,
143                           char *name, irq_handler_t irq_handler,
144                           void *irq_arg, cpumask_t *affinity_mask)
145 {
146         struct pci_dev *pdev = self->pdev;
147         int err;
148
149         if (pdev->msix_enabled || pdev->msi_enabled)
150                 err = request_irq(pci_irq_vector(pdev, i), irq_handler, 0,
151                                   name, irq_arg);
152         else
153                 err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr_legacy,
154                                   IRQF_SHARED, name, irq_arg);
155
156         if (err >= 0) {
157                 self->msix_entry_mask |= (1 << i);
158
159                 if (pdev->msix_enabled && affinity_mask)
160                         irq_set_affinity_hint(pci_irq_vector(pdev, i),
161                                               affinity_mask);
162         }
163
164         return err;
165 }
166
167 void aq_pci_func_free_irqs(struct aq_nic_s *self)
168 {
169         struct pci_dev *pdev = self->pdev;
170         unsigned int i;
171         void *irq_data;
172
173         for (i = 32U; i--;) {
174                 if (!((1U << i) & self->msix_entry_mask))
175                         continue;
176                 if (self->aq_nic_cfg.link_irq_vec &&
177                     i == self->aq_nic_cfg.link_irq_vec)
178                         irq_data = self;
179                 else if (i < AQ_CFG_VECS_MAX)
180                         irq_data = self->aq_vec[i];
181                 else
182                         continue;
183
184                 if (pdev->msix_enabled)
185                         irq_set_affinity_hint(pci_irq_vector(pdev, i), NULL);
186                 free_irq(pci_irq_vector(pdev, i), irq_data);
187                 self->msix_entry_mask &= ~(1U << i);
188         }
189 }
190
191 unsigned int aq_pci_func_get_irq_type(struct aq_nic_s *self)
192 {
193         if (self->pdev->msix_enabled)
194                 return AQ_HW_IRQ_MSIX;
195         if (self->pdev->msi_enabled)
196                 return AQ_HW_IRQ_MSI;
197
198         return AQ_HW_IRQ_LEGACY;
199 }
200
201 static void aq_pci_free_irq_vectors(struct aq_nic_s *self)
202 {
203         pci_free_irq_vectors(self->pdev);
204 }
205
206 static int aq_pci_probe(struct pci_dev *pdev,
207                         const struct pci_device_id *pci_id)
208 {
209         struct net_device *ndev;
210         resource_size_t mmio_pa;
211         struct aq_nic_s *self;
212         u32 numvecs;
213         u32 bar;
214         int err;
215
216         err = pci_enable_device(pdev);
217         if (err)
218                 return err;
219
220         err = aq_pci_func_init(pdev);
221         if (err)
222                 goto err_pci_func;
223
224         ndev = aq_ndev_alloc();
225         if (!ndev) {
226                 err = -ENOMEM;
227                 goto err_ndev;
228         }
229
230         self = netdev_priv(ndev);
231         self->pdev = pdev;
232         SET_NETDEV_DEV(ndev, &pdev->dev);
233         pci_set_drvdata(pdev, self);
234
235         mutex_init(&self->fwreq_mutex);
236
237         err = aq_pci_probe_get_hw_by_id(pdev, &self->aq_hw_ops,
238                                         &aq_nic_get_cfg(self)->aq_hw_caps);
239         if (err)
240                 goto err_ioremap;
241
242         self->aq_hw = kzalloc(sizeof(*self->aq_hw), GFP_KERNEL);
243         if (!self->aq_hw) {
244                 err = -ENOMEM;
245                 goto err_ioremap;
246         }
247         self->aq_hw->aq_nic_cfg = aq_nic_get_cfg(self);
248         if (self->aq_hw->aq_nic_cfg->aq_hw_caps->priv_data_len) {
249                 int len = self->aq_hw->aq_nic_cfg->aq_hw_caps->priv_data_len;
250
251                 self->aq_hw->priv = kzalloc(len, GFP_KERNEL);
252                 if (!self->aq_hw->priv) {
253                         err = -ENOMEM;
254                         goto err_free_aq_hw;
255                 }
256         }
257
258         for (bar = 0; bar < 4; ++bar) {
259                 if (IORESOURCE_MEM & pci_resource_flags(pdev, bar)) {
260                         resource_size_t reg_sz;
261
262                         mmio_pa = pci_resource_start(pdev, bar);
263                         if (mmio_pa == 0U) {
264                                 err = -EIO;
265                                 goto err_free_aq_hw_priv;
266                         }
267
268                         reg_sz = pci_resource_len(pdev, bar);
269                         if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) {
270                                 err = -EIO;
271                                 goto err_free_aq_hw_priv;
272                         }
273
274                         self->aq_hw->mmio = ioremap(mmio_pa, reg_sz);
275                         if (!self->aq_hw->mmio) {
276                                 err = -EIO;
277                                 goto err_free_aq_hw_priv;
278                         }
279                         break;
280                 }
281         }
282
283         if (bar == 4) {
284                 err = -EIO;
285                 goto err_free_aq_hw_priv;
286         }
287
288         numvecs = min((u8)AQ_CFG_VECS_DEF,
289                       aq_nic_get_cfg(self)->aq_hw_caps->msix_irqs);
290         numvecs = min(numvecs, num_online_cpus());
291         /* Request IRQ vector for PTP */
292         numvecs += 1;
293
294         numvecs += AQ_HW_SERVICE_IRQS;
295         /*enable interrupts */
296 #if !AQ_CFG_FORCE_LEGACY_INT
297         err = pci_alloc_irq_vectors(self->pdev, 1, numvecs,
298                                     PCI_IRQ_MSIX | PCI_IRQ_MSI |
299                                     PCI_IRQ_LEGACY);
300
301         if (err < 0)
302                 goto err_hwinit;
303         numvecs = err;
304 #endif
305         self->irqvecs = numvecs;
306
307         /* net device init */
308         aq_nic_cfg_start(self);
309
310         aq_nic_ndev_init(self);
311
312         err = aq_nic_ndev_register(self);
313         if (err < 0)
314                 goto err_register;
315
316         aq_drvinfo_init(ndev);
317
318         return 0;
319
320 err_register:
321         aq_nic_free_vectors(self);
322         aq_pci_free_irq_vectors(self);
323 err_hwinit:
324         iounmap(self->aq_hw->mmio);
325 err_free_aq_hw_priv:
326         kfree(self->aq_hw->priv);
327 err_free_aq_hw:
328         kfree(self->aq_hw);
329 err_ioremap:
330         free_netdev(ndev);
331 err_ndev:
332         pci_release_regions(pdev);
333 err_pci_func:
334         pci_disable_device(pdev);
335
336         return err;
337 }
338
339 static void aq_pci_remove(struct pci_dev *pdev)
340 {
341         struct aq_nic_s *self = pci_get_drvdata(pdev);
342
343         if (self->ndev) {
344                 aq_clear_rxnfc_all_rules(self);
345                 if (self->ndev->reg_state == NETREG_REGISTERED)
346                         unregister_netdev(self->ndev);
347
348 #if IS_ENABLED(CONFIG_MACSEC)
349                 aq_macsec_free(self);
350 #endif
351                 aq_nic_free_vectors(self);
352                 aq_pci_free_irq_vectors(self);
353                 iounmap(self->aq_hw->mmio);
354                 kfree(self->aq_hw->priv);
355                 kfree(self->aq_hw);
356                 pci_release_regions(pdev);
357                 free_netdev(self->ndev);
358         }
359
360         pci_disable_device(pdev);
361 }
362
363 static void aq_pci_shutdown(struct pci_dev *pdev)
364 {
365         struct aq_nic_s *self = pci_get_drvdata(pdev);
366
367         aq_nic_shutdown(self);
368
369         pci_disable_device(pdev);
370
371         if (system_state == SYSTEM_POWER_OFF) {
372                 pci_wake_from_d3(pdev, false);
373                 pci_set_power_state(pdev, PCI_D3hot);
374         }
375 }
376
377 static int aq_suspend_common(struct device *dev, bool deep)
378 {
379         struct aq_nic_s *nic = pci_get_drvdata(to_pci_dev(dev));
380
381         rtnl_lock();
382
383         nic->power_state = AQ_HW_POWER_STATE_D3;
384         netif_device_detach(nic->ndev);
385         netif_tx_stop_all_queues(nic->ndev);
386
387         if (netif_running(nic->ndev))
388                 aq_nic_stop(nic);
389
390         if (deep) {
391                 aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
392                 aq_nic_set_power(nic);
393         }
394
395         rtnl_unlock();
396
397         return 0;
398 }
399
400 static int atl_resume_common(struct device *dev, bool deep)
401 {
402         struct pci_dev *pdev = to_pci_dev(dev);
403         struct aq_nic_s *nic;
404         int ret = 0;
405
406         nic = pci_get_drvdata(pdev);
407
408         rtnl_lock();
409
410         pci_set_power_state(pdev, PCI_D0);
411         pci_restore_state(pdev);
412
413         if (deep) {
414                 /* Reinitialize Nic/Vecs objects */
415                 aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
416
417                 ret = aq_nic_init(nic);
418                 if (ret)
419                         goto err_exit;
420         }
421
422         if (netif_running(nic->ndev)) {
423                 ret = aq_nic_start(nic);
424                 if (ret)
425                         goto err_exit;
426         }
427
428         netif_device_attach(nic->ndev);
429         netif_tx_start_all_queues(nic->ndev);
430
431 err_exit:
432         if (ret < 0)
433                 aq_nic_deinit(nic, true);
434
435         rtnl_unlock();
436
437         return ret;
438 }
439
440 static int aq_pm_freeze(struct device *dev)
441 {
442         return aq_suspend_common(dev, false);
443 }
444
445 static int aq_pm_suspend_poweroff(struct device *dev)
446 {
447         return aq_suspend_common(dev, true);
448 }
449
450 static int aq_pm_thaw(struct device *dev)
451 {
452         return atl_resume_common(dev, false);
453 }
454
455 static int aq_pm_resume_restore(struct device *dev)
456 {
457         return atl_resume_common(dev, true);
458 }
459
460 static const struct dev_pm_ops aq_pm_ops = {
461         .suspend = aq_pm_suspend_poweroff,
462         .poweroff = aq_pm_suspend_poweroff,
463         .freeze = aq_pm_freeze,
464         .resume = aq_pm_resume_restore,
465         .restore = aq_pm_resume_restore,
466         .thaw = aq_pm_thaw,
467 };
468
469 static struct pci_driver aq_pci_ops = {
470         .name = AQ_CFG_DRV_NAME,
471         .id_table = aq_pci_tbl,
472         .probe = aq_pci_probe,
473         .remove = aq_pci_remove,
474         .shutdown = aq_pci_shutdown,
475 #ifdef CONFIG_PM
476         .driver.pm = &aq_pm_ops,
477 #endif
478 };
479
480 int aq_pci_func_register_driver(void)
481 {
482         return pci_register_driver(&aq_pci_ops);
483 }
484
485 void aq_pci_func_unregister_driver(void)
486 {
487         pci_unregister_driver(&aq_pci_ops);
488 }
489