Merge tag 'asm-generic-fixes-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git...
[platform/kernel/linux-starfive.git] / drivers / net / ethernet / mellanox / mlxbf_gige / mlxbf_gige_main.c
1 // SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
2
3 /* Gigabit Ethernet driver for Mellanox BlueField SoC
4  *
5  * Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES
6  */
7
8 #include <linux/acpi.h>
9 #include <linux/device.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/etherdevice.h>
12 #include <linux/interrupt.h>
13 #include <linux/iopoll.h>
14 #include <linux/module.h>
15 #include <linux/phy.h>
16 #include <linux/platform_device.h>
17 #include <linux/skbuff.h>
18
19 #include "mlxbf_gige.h"
20 #include "mlxbf_gige_regs.h"
21
22 #define DRV_NAME    "mlxbf_gige"
23
24 /* Allocate SKB whose payload pointer aligns with the Bluefield
25  * hardware DMA limitation, i.e. DMA operation can't cross
26  * a 4KB boundary.  A maximum packet size of 2KB is assumed in the
27  * alignment formula.  The alignment logic overallocates an SKB,
28  * and then adjusts the headroom so that the SKB data pointer is
29  * naturally aligned to a 2KB boundary.
30  */
31 struct sk_buff *mlxbf_gige_alloc_skb(struct mlxbf_gige *priv,
32                                      unsigned int map_len,
33                                      dma_addr_t *buf_dma,
34                                      enum dma_data_direction dir)
35 {
36         struct sk_buff *skb;
37         u64 addr, offset;
38
39         /* Overallocate the SKB so that any headroom adjustment (to
40          * provide 2KB natural alignment) does not exceed payload area
41          */
42         skb = netdev_alloc_skb(priv->netdev, MLXBF_GIGE_DEFAULT_BUF_SZ * 2);
43         if (!skb)
44                 return NULL;
45
46         /* Adjust the headroom so that skb->data is naturally aligned to
47          * a 2KB boundary, which is the maximum packet size supported.
48          */
49         addr = (long)skb->data;
50         offset = (addr + MLXBF_GIGE_DEFAULT_BUF_SZ - 1) &
51                 ~(MLXBF_GIGE_DEFAULT_BUF_SZ - 1);
52         offset -= addr;
53         if (offset)
54                 skb_reserve(skb, offset);
55
56         /* Return streaming DMA mapping to caller */
57         *buf_dma = dma_map_single(priv->dev, skb->data, map_len, dir);
58         if (dma_mapping_error(priv->dev, *buf_dma)) {
59                 dev_kfree_skb(skb);
60                 *buf_dma = (dma_addr_t)0;
61                 return NULL;
62         }
63
64         return skb;
65 }
66
67 static void mlxbf_gige_initial_mac(struct mlxbf_gige *priv)
68 {
69         u8 mac[ETH_ALEN];
70         u64 local_mac;
71
72         memset(mac, 0, ETH_ALEN);
73         mlxbf_gige_get_mac_rx_filter(priv, MLXBF_GIGE_LOCAL_MAC_FILTER_IDX,
74                                      &local_mac);
75         u64_to_ether_addr(local_mac, mac);
76
77         if (is_valid_ether_addr(mac)) {
78                 ether_addr_copy(priv->netdev->dev_addr, mac);
79         } else {
80                 /* Provide a random MAC if for some reason the device has
81                  * not been configured with a valid MAC address already.
82                  */
83                 eth_hw_addr_random(priv->netdev);
84         }
85
86         local_mac = ether_addr_to_u64(priv->netdev->dev_addr);
87         mlxbf_gige_set_mac_rx_filter(priv, MLXBF_GIGE_LOCAL_MAC_FILTER_IDX,
88                                      local_mac);
89 }
90
91 static void mlxbf_gige_cache_stats(struct mlxbf_gige *priv)
92 {
93         struct mlxbf_gige_stats *p;
94
95         /* Cache stats that will be cleared by clean port operation */
96         p = &priv->stats;
97         p->rx_din_dropped_pkts += readq(priv->base +
98                                         MLXBF_GIGE_RX_DIN_DROP_COUNTER);
99         p->rx_filter_passed_pkts += readq(priv->base +
100                                           MLXBF_GIGE_RX_PASS_COUNTER_ALL);
101         p->rx_filter_discard_pkts += readq(priv->base +
102                                            MLXBF_GIGE_RX_DISC_COUNTER_ALL);
103 }
104
105 static int mlxbf_gige_clean_port(struct mlxbf_gige *priv)
106 {
107         u64 control;
108         u64 temp;
109         int err;
110
111         /* Set the CLEAN_PORT_EN bit to trigger SW reset */
112         control = readq(priv->base + MLXBF_GIGE_CONTROL);
113         control |= MLXBF_GIGE_CONTROL_CLEAN_PORT_EN;
114         writeq(control, priv->base + MLXBF_GIGE_CONTROL);
115
116         /* Ensure completion of "clean port" write before polling status */
117         mb();
118
119         err = readq_poll_timeout_atomic(priv->base + MLXBF_GIGE_STATUS, temp,
120                                         (temp & MLXBF_GIGE_STATUS_READY),
121                                         100, 100000);
122
123         /* Clear the CLEAN_PORT_EN bit at end of this loop */
124         control = readq(priv->base + MLXBF_GIGE_CONTROL);
125         control &= ~MLXBF_GIGE_CONTROL_CLEAN_PORT_EN;
126         writeq(control, priv->base + MLXBF_GIGE_CONTROL);
127
128         return err;
129 }
130
131 static int mlxbf_gige_open(struct net_device *netdev)
132 {
133         struct mlxbf_gige *priv = netdev_priv(netdev);
134         struct phy_device *phydev = netdev->phydev;
135         u64 int_en;
136         int err;
137
138         err = mlxbf_gige_request_irqs(priv);
139         if (err)
140                 return err;
141         mlxbf_gige_cache_stats(priv);
142         err = mlxbf_gige_clean_port(priv);
143         if (err)
144                 goto free_irqs;
145
146         /* Clear driver's valid_polarity to match hardware,
147          * since the above call to clean_port() resets the
148          * receive polarity used by hardware.
149          */
150         priv->valid_polarity = 0;
151
152         err = mlxbf_gige_rx_init(priv);
153         if (err)
154                 goto free_irqs;
155         err = mlxbf_gige_tx_init(priv);
156         if (err)
157                 goto rx_deinit;
158
159         phy_start(phydev);
160
161         netif_napi_add(netdev, &priv->napi, mlxbf_gige_poll, NAPI_POLL_WEIGHT);
162         napi_enable(&priv->napi);
163         netif_start_queue(netdev);
164
165         /* Set bits in INT_EN that we care about */
166         int_en = MLXBF_GIGE_INT_EN_HW_ACCESS_ERROR |
167                  MLXBF_GIGE_INT_EN_TX_CHECKSUM_INPUTS |
168                  MLXBF_GIGE_INT_EN_TX_SMALL_FRAME_SIZE |
169                  MLXBF_GIGE_INT_EN_TX_PI_CI_EXCEED_WQ_SIZE |
170                  MLXBF_GIGE_INT_EN_SW_CONFIG_ERROR |
171                  MLXBF_GIGE_INT_EN_SW_ACCESS_ERROR |
172                  MLXBF_GIGE_INT_EN_RX_RECEIVE_PACKET;
173
174         /* Ensure completion of all initialization before enabling interrupts */
175         mb();
176
177         writeq(int_en, priv->base + MLXBF_GIGE_INT_EN);
178
179         return 0;
180
181 rx_deinit:
182         mlxbf_gige_rx_deinit(priv);
183
184 free_irqs:
185         mlxbf_gige_free_irqs(priv);
186         return err;
187 }
188
189 static int mlxbf_gige_stop(struct net_device *netdev)
190 {
191         struct mlxbf_gige *priv = netdev_priv(netdev);
192
193         writeq(0, priv->base + MLXBF_GIGE_INT_EN);
194         netif_stop_queue(netdev);
195         napi_disable(&priv->napi);
196         netif_napi_del(&priv->napi);
197         mlxbf_gige_free_irqs(priv);
198
199         phy_stop(netdev->phydev);
200
201         mlxbf_gige_rx_deinit(priv);
202         mlxbf_gige_tx_deinit(priv);
203         mlxbf_gige_cache_stats(priv);
204         mlxbf_gige_clean_port(priv);
205
206         return 0;
207 }
208
209 static int mlxbf_gige_eth_ioctl(struct net_device *netdev,
210                                struct ifreq *ifr, int cmd)
211 {
212         if (!(netif_running(netdev)))
213                 return -EINVAL;
214
215         return phy_mii_ioctl(netdev->phydev, ifr, cmd);
216 }
217
218 static void mlxbf_gige_set_rx_mode(struct net_device *netdev)
219 {
220         struct mlxbf_gige *priv = netdev_priv(netdev);
221         bool new_promisc_enabled;
222
223         new_promisc_enabled = netdev->flags & IFF_PROMISC;
224
225         /* Only write to the hardware registers if the new setting
226          * of promiscuous mode is different from the current one.
227          */
228         if (new_promisc_enabled != priv->promisc_enabled) {
229                 priv->promisc_enabled = new_promisc_enabled;
230
231                 if (new_promisc_enabled)
232                         mlxbf_gige_enable_promisc(priv);
233                 else
234                         mlxbf_gige_disable_promisc(priv);
235         }
236 }
237
238 static void mlxbf_gige_get_stats64(struct net_device *netdev,
239                                    struct rtnl_link_stats64 *stats)
240 {
241         struct mlxbf_gige *priv = netdev_priv(netdev);
242
243         netdev_stats_to_stats64(stats, &netdev->stats);
244
245         stats->rx_length_errors = priv->stats.rx_truncate_errors;
246         stats->rx_fifo_errors = priv->stats.rx_din_dropped_pkts +
247                                 readq(priv->base + MLXBF_GIGE_RX_DIN_DROP_COUNTER);
248         stats->rx_crc_errors = priv->stats.rx_mac_errors;
249         stats->rx_errors = stats->rx_length_errors +
250                            stats->rx_fifo_errors +
251                            stats->rx_crc_errors;
252
253         stats->tx_fifo_errors = priv->stats.tx_fifo_full;
254         stats->tx_errors = stats->tx_fifo_errors;
255 }
256
257 static const struct net_device_ops mlxbf_gige_netdev_ops = {
258         .ndo_open               = mlxbf_gige_open,
259         .ndo_stop               = mlxbf_gige_stop,
260         .ndo_start_xmit         = mlxbf_gige_start_xmit,
261         .ndo_set_mac_address    = eth_mac_addr,
262         .ndo_validate_addr      = eth_validate_addr,
263         .ndo_eth_ioctl          = mlxbf_gige_eth_ioctl,
264         .ndo_set_rx_mode        = mlxbf_gige_set_rx_mode,
265         .ndo_get_stats64        = mlxbf_gige_get_stats64,
266 };
267
268 static void mlxbf_gige_adjust_link(struct net_device *netdev)
269 {
270         struct phy_device *phydev = netdev->phydev;
271
272         phy_print_status(phydev);
273 }
274
275 static int mlxbf_gige_probe(struct platform_device *pdev)
276 {
277         struct phy_device *phydev;
278         struct net_device *netdev;
279         struct mlxbf_gige *priv;
280         void __iomem *llu_base;
281         void __iomem *plu_base;
282         void __iomem *base;
283         u64 control;
284         int addr;
285         int err;
286
287         base = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_MAC);
288         if (IS_ERR(base))
289                 return PTR_ERR(base);
290
291         llu_base = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_LLU);
292         if (IS_ERR(llu_base))
293                 return PTR_ERR(llu_base);
294
295         plu_base = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_PLU);
296         if (IS_ERR(plu_base))
297                 return PTR_ERR(plu_base);
298
299         /* Perform general init of GigE block */
300         control = readq(base + MLXBF_GIGE_CONTROL);
301         control |= MLXBF_GIGE_CONTROL_PORT_EN;
302         writeq(control, base + MLXBF_GIGE_CONTROL);
303
304         netdev = devm_alloc_etherdev(&pdev->dev, sizeof(*priv));
305         if (!netdev)
306                 return -ENOMEM;
307
308         SET_NETDEV_DEV(netdev, &pdev->dev);
309         netdev->netdev_ops = &mlxbf_gige_netdev_ops;
310         netdev->ethtool_ops = &mlxbf_gige_ethtool_ops;
311         priv = netdev_priv(netdev);
312         priv->netdev = netdev;
313
314         platform_set_drvdata(pdev, priv);
315         priv->dev = &pdev->dev;
316         priv->pdev = pdev;
317
318         spin_lock_init(&priv->lock);
319         spin_lock_init(&priv->gpio_lock);
320
321         /* Attach MDIO device */
322         err = mlxbf_gige_mdio_probe(pdev, priv);
323         if (err)
324                 return err;
325
326         err = mlxbf_gige_gpio_init(pdev, priv);
327         if (err) {
328                 dev_err(&pdev->dev, "PHY IRQ initialization failed\n");
329                 mlxbf_gige_mdio_remove(priv);
330                 return -ENODEV;
331         }
332
333         priv->base = base;
334         priv->llu_base = llu_base;
335         priv->plu_base = plu_base;
336
337         priv->rx_q_entries = MLXBF_GIGE_DEFAULT_RXQ_SZ;
338         priv->tx_q_entries = MLXBF_GIGE_DEFAULT_TXQ_SZ;
339
340         /* Write initial MAC address to hardware */
341         mlxbf_gige_initial_mac(priv);
342
343         err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
344         if (err) {
345                 dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err);
346                 goto out;
347         }
348
349         priv->error_irq = platform_get_irq(pdev, MLXBF_GIGE_ERROR_INTR_IDX);
350         priv->rx_irq = platform_get_irq(pdev, MLXBF_GIGE_RECEIVE_PKT_INTR_IDX);
351         priv->llu_plu_irq = platform_get_irq(pdev, MLXBF_GIGE_LLU_PLU_INTR_IDX);
352
353         phydev = phy_find_first(priv->mdiobus);
354         if (!phydev) {
355                 err = -ENODEV;
356                 goto out;
357         }
358
359         addr = phydev->mdio.addr;
360         priv->mdiobus->irq[addr] = priv->phy_irq;
361         phydev->irq = priv->phy_irq;
362
363         err = phy_connect_direct(netdev, phydev,
364                                  mlxbf_gige_adjust_link,
365                                  PHY_INTERFACE_MODE_GMII);
366         if (err) {
367                 dev_err(&pdev->dev, "Could not attach to PHY\n");
368                 goto out;
369         }
370
371         /* MAC only supports 1000T full duplex mode */
372         phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
373         phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
374         phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
375         phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
376         phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
377
378         /* Only symmetric pause with flow control enabled is supported so no
379          * need to negotiate pause.
380          */
381         linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->advertising);
382         linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->advertising);
383
384         /* Display information about attached PHY device */
385         phy_attached_info(phydev);
386
387         err = register_netdev(netdev);
388         if (err) {
389                 dev_err(&pdev->dev, "Failed to register netdev\n");
390                 phy_disconnect(phydev);
391                 goto out;
392         }
393
394         return 0;
395
396 out:
397         mlxbf_gige_gpio_free(priv);
398         mlxbf_gige_mdio_remove(priv);
399         return err;
400 }
401
402 static int mlxbf_gige_remove(struct platform_device *pdev)
403 {
404         struct mlxbf_gige *priv = platform_get_drvdata(pdev);
405
406         unregister_netdev(priv->netdev);
407         phy_disconnect(priv->netdev->phydev);
408         mlxbf_gige_gpio_free(priv);
409         mlxbf_gige_mdio_remove(priv);
410         platform_set_drvdata(pdev, NULL);
411
412         return 0;
413 }
414
415 static void mlxbf_gige_shutdown(struct platform_device *pdev)
416 {
417         struct mlxbf_gige *priv = platform_get_drvdata(pdev);
418
419         writeq(0, priv->base + MLXBF_GIGE_INT_EN);
420         mlxbf_gige_clean_port(priv);
421 }
422
423 static const struct acpi_device_id __maybe_unused mlxbf_gige_acpi_match[] = {
424         { "MLNXBF17", 0 },
425         {},
426 };
427 MODULE_DEVICE_TABLE(acpi, mlxbf_gige_acpi_match);
428
429 static struct platform_driver mlxbf_gige_driver = {
430         .probe = mlxbf_gige_probe,
431         .remove = mlxbf_gige_remove,
432         .shutdown = mlxbf_gige_shutdown,
433         .driver = {
434                 .name = DRV_NAME,
435                 .acpi_match_table = ACPI_PTR(mlxbf_gige_acpi_match),
436         },
437 };
438
439 module_platform_driver(mlxbf_gige_driver);
440
441 MODULE_DESCRIPTION("Mellanox BlueField SoC Gigabit Ethernet Driver");
442 MODULE_AUTHOR("David Thompson <davthompson@nvidia.com>");
443 MODULE_AUTHOR("Asmaa Mnebhi <asmaa@nvidia.com>");
444 MODULE_LICENSE("Dual BSD/GPL");