1 // SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
3 /* Gigabit Ethernet driver for Mellanox BlueField SoC
5 * Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES
8 #include <linux/acpi.h>
9 #include <linux/device.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/etherdevice.h>
12 #include <linux/interrupt.h>
13 #include <linux/iopoll.h>
14 #include <linux/module.h>
15 #include <linux/phy.h>
16 #include <linux/platform_device.h>
17 #include <linux/skbuff.h>
19 #include "mlxbf_gige.h"
20 #include "mlxbf_gige_regs.h"
22 #define DRV_NAME "mlxbf_gige"
24 /* Allocate SKB whose payload pointer aligns with the Bluefield
25 * hardware DMA limitation, i.e. DMA operation can't cross
26 * a 4KB boundary. A maximum packet size of 2KB is assumed in the
27 * alignment formula. The alignment logic overallocates an SKB,
28 * and then adjusts the headroom so that the SKB data pointer is
29 * naturally aligned to a 2KB boundary.
31 struct sk_buff *mlxbf_gige_alloc_skb(struct mlxbf_gige *priv,
34 enum dma_data_direction dir)
39 /* Overallocate the SKB so that any headroom adjustment (to
40 * provide 2KB natural alignment) does not exceed payload area
42 skb = netdev_alloc_skb(priv->netdev, MLXBF_GIGE_DEFAULT_BUF_SZ * 2);
46 /* Adjust the headroom so that skb->data is naturally aligned to
47 * a 2KB boundary, which is the maximum packet size supported.
49 addr = (long)skb->data;
50 offset = (addr + MLXBF_GIGE_DEFAULT_BUF_SZ - 1) &
51 ~(MLXBF_GIGE_DEFAULT_BUF_SZ - 1);
54 skb_reserve(skb, offset);
56 /* Return streaming DMA mapping to caller */
57 *buf_dma = dma_map_single(priv->dev, skb->data, map_len, dir);
58 if (dma_mapping_error(priv->dev, *buf_dma)) {
60 *buf_dma = (dma_addr_t)0;
67 static void mlxbf_gige_initial_mac(struct mlxbf_gige *priv)
72 memset(mac, 0, ETH_ALEN);
73 mlxbf_gige_get_mac_rx_filter(priv, MLXBF_GIGE_LOCAL_MAC_FILTER_IDX,
75 u64_to_ether_addr(local_mac, mac);
77 if (is_valid_ether_addr(mac)) {
78 ether_addr_copy(priv->netdev->dev_addr, mac);
80 /* Provide a random MAC if for some reason the device has
81 * not been configured with a valid MAC address already.
83 eth_hw_addr_random(priv->netdev);
86 local_mac = ether_addr_to_u64(priv->netdev->dev_addr);
87 mlxbf_gige_set_mac_rx_filter(priv, MLXBF_GIGE_LOCAL_MAC_FILTER_IDX,
91 static void mlxbf_gige_cache_stats(struct mlxbf_gige *priv)
93 struct mlxbf_gige_stats *p;
95 /* Cache stats that will be cleared by clean port operation */
97 p->rx_din_dropped_pkts += readq(priv->base +
98 MLXBF_GIGE_RX_DIN_DROP_COUNTER);
99 p->rx_filter_passed_pkts += readq(priv->base +
100 MLXBF_GIGE_RX_PASS_COUNTER_ALL);
101 p->rx_filter_discard_pkts += readq(priv->base +
102 MLXBF_GIGE_RX_DISC_COUNTER_ALL);
105 static int mlxbf_gige_clean_port(struct mlxbf_gige *priv)
111 /* Set the CLEAN_PORT_EN bit to trigger SW reset */
112 control = readq(priv->base + MLXBF_GIGE_CONTROL);
113 control |= MLXBF_GIGE_CONTROL_CLEAN_PORT_EN;
114 writeq(control, priv->base + MLXBF_GIGE_CONTROL);
116 /* Ensure completion of "clean port" write before polling status */
119 err = readq_poll_timeout_atomic(priv->base + MLXBF_GIGE_STATUS, temp,
120 (temp & MLXBF_GIGE_STATUS_READY),
123 /* Clear the CLEAN_PORT_EN bit at end of this loop */
124 control = readq(priv->base + MLXBF_GIGE_CONTROL);
125 control &= ~MLXBF_GIGE_CONTROL_CLEAN_PORT_EN;
126 writeq(control, priv->base + MLXBF_GIGE_CONTROL);
131 static int mlxbf_gige_open(struct net_device *netdev)
133 struct mlxbf_gige *priv = netdev_priv(netdev);
134 struct phy_device *phydev = netdev->phydev;
138 err = mlxbf_gige_request_irqs(priv);
141 mlxbf_gige_cache_stats(priv);
142 err = mlxbf_gige_clean_port(priv);
146 /* Clear driver's valid_polarity to match hardware,
147 * since the above call to clean_port() resets the
148 * receive polarity used by hardware.
150 priv->valid_polarity = 0;
152 err = mlxbf_gige_rx_init(priv);
155 err = mlxbf_gige_tx_init(priv);
161 netif_napi_add(netdev, &priv->napi, mlxbf_gige_poll, NAPI_POLL_WEIGHT);
162 napi_enable(&priv->napi);
163 netif_start_queue(netdev);
165 /* Set bits in INT_EN that we care about */
166 int_en = MLXBF_GIGE_INT_EN_HW_ACCESS_ERROR |
167 MLXBF_GIGE_INT_EN_TX_CHECKSUM_INPUTS |
168 MLXBF_GIGE_INT_EN_TX_SMALL_FRAME_SIZE |
169 MLXBF_GIGE_INT_EN_TX_PI_CI_EXCEED_WQ_SIZE |
170 MLXBF_GIGE_INT_EN_SW_CONFIG_ERROR |
171 MLXBF_GIGE_INT_EN_SW_ACCESS_ERROR |
172 MLXBF_GIGE_INT_EN_RX_RECEIVE_PACKET;
174 /* Ensure completion of all initialization before enabling interrupts */
177 writeq(int_en, priv->base + MLXBF_GIGE_INT_EN);
182 mlxbf_gige_rx_deinit(priv);
185 mlxbf_gige_free_irqs(priv);
189 static int mlxbf_gige_stop(struct net_device *netdev)
191 struct mlxbf_gige *priv = netdev_priv(netdev);
193 writeq(0, priv->base + MLXBF_GIGE_INT_EN);
194 netif_stop_queue(netdev);
195 napi_disable(&priv->napi);
196 netif_napi_del(&priv->napi);
197 mlxbf_gige_free_irqs(priv);
199 phy_stop(netdev->phydev);
201 mlxbf_gige_rx_deinit(priv);
202 mlxbf_gige_tx_deinit(priv);
203 mlxbf_gige_cache_stats(priv);
204 mlxbf_gige_clean_port(priv);
209 static int mlxbf_gige_eth_ioctl(struct net_device *netdev,
210 struct ifreq *ifr, int cmd)
212 if (!(netif_running(netdev)))
215 return phy_mii_ioctl(netdev->phydev, ifr, cmd);
218 static void mlxbf_gige_set_rx_mode(struct net_device *netdev)
220 struct mlxbf_gige *priv = netdev_priv(netdev);
221 bool new_promisc_enabled;
223 new_promisc_enabled = netdev->flags & IFF_PROMISC;
225 /* Only write to the hardware registers if the new setting
226 * of promiscuous mode is different from the current one.
228 if (new_promisc_enabled != priv->promisc_enabled) {
229 priv->promisc_enabled = new_promisc_enabled;
231 if (new_promisc_enabled)
232 mlxbf_gige_enable_promisc(priv);
234 mlxbf_gige_disable_promisc(priv);
238 static void mlxbf_gige_get_stats64(struct net_device *netdev,
239 struct rtnl_link_stats64 *stats)
241 struct mlxbf_gige *priv = netdev_priv(netdev);
243 netdev_stats_to_stats64(stats, &netdev->stats);
245 stats->rx_length_errors = priv->stats.rx_truncate_errors;
246 stats->rx_fifo_errors = priv->stats.rx_din_dropped_pkts +
247 readq(priv->base + MLXBF_GIGE_RX_DIN_DROP_COUNTER);
248 stats->rx_crc_errors = priv->stats.rx_mac_errors;
249 stats->rx_errors = stats->rx_length_errors +
250 stats->rx_fifo_errors +
251 stats->rx_crc_errors;
253 stats->tx_fifo_errors = priv->stats.tx_fifo_full;
254 stats->tx_errors = stats->tx_fifo_errors;
257 static const struct net_device_ops mlxbf_gige_netdev_ops = {
258 .ndo_open = mlxbf_gige_open,
259 .ndo_stop = mlxbf_gige_stop,
260 .ndo_start_xmit = mlxbf_gige_start_xmit,
261 .ndo_set_mac_address = eth_mac_addr,
262 .ndo_validate_addr = eth_validate_addr,
263 .ndo_eth_ioctl = mlxbf_gige_eth_ioctl,
264 .ndo_set_rx_mode = mlxbf_gige_set_rx_mode,
265 .ndo_get_stats64 = mlxbf_gige_get_stats64,
268 static void mlxbf_gige_adjust_link(struct net_device *netdev)
270 struct phy_device *phydev = netdev->phydev;
272 phy_print_status(phydev);
275 static int mlxbf_gige_probe(struct platform_device *pdev)
277 struct phy_device *phydev;
278 struct net_device *netdev;
279 struct mlxbf_gige *priv;
280 void __iomem *llu_base;
281 void __iomem *plu_base;
287 base = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_MAC);
289 return PTR_ERR(base);
291 llu_base = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_LLU);
292 if (IS_ERR(llu_base))
293 return PTR_ERR(llu_base);
295 plu_base = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_PLU);
296 if (IS_ERR(plu_base))
297 return PTR_ERR(plu_base);
299 /* Perform general init of GigE block */
300 control = readq(base + MLXBF_GIGE_CONTROL);
301 control |= MLXBF_GIGE_CONTROL_PORT_EN;
302 writeq(control, base + MLXBF_GIGE_CONTROL);
304 netdev = devm_alloc_etherdev(&pdev->dev, sizeof(*priv));
308 SET_NETDEV_DEV(netdev, &pdev->dev);
309 netdev->netdev_ops = &mlxbf_gige_netdev_ops;
310 netdev->ethtool_ops = &mlxbf_gige_ethtool_ops;
311 priv = netdev_priv(netdev);
312 priv->netdev = netdev;
314 platform_set_drvdata(pdev, priv);
315 priv->dev = &pdev->dev;
318 spin_lock_init(&priv->lock);
319 spin_lock_init(&priv->gpio_lock);
321 /* Attach MDIO device */
322 err = mlxbf_gige_mdio_probe(pdev, priv);
326 err = mlxbf_gige_gpio_init(pdev, priv);
328 dev_err(&pdev->dev, "PHY IRQ initialization failed\n");
329 mlxbf_gige_mdio_remove(priv);
334 priv->llu_base = llu_base;
335 priv->plu_base = plu_base;
337 priv->rx_q_entries = MLXBF_GIGE_DEFAULT_RXQ_SZ;
338 priv->tx_q_entries = MLXBF_GIGE_DEFAULT_TXQ_SZ;
340 /* Write initial MAC address to hardware */
341 mlxbf_gige_initial_mac(priv);
343 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
345 dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err);
349 priv->error_irq = platform_get_irq(pdev, MLXBF_GIGE_ERROR_INTR_IDX);
350 priv->rx_irq = platform_get_irq(pdev, MLXBF_GIGE_RECEIVE_PKT_INTR_IDX);
351 priv->llu_plu_irq = platform_get_irq(pdev, MLXBF_GIGE_LLU_PLU_INTR_IDX);
353 phydev = phy_find_first(priv->mdiobus);
359 addr = phydev->mdio.addr;
360 priv->mdiobus->irq[addr] = priv->phy_irq;
361 phydev->irq = priv->phy_irq;
363 err = phy_connect_direct(netdev, phydev,
364 mlxbf_gige_adjust_link,
365 PHY_INTERFACE_MODE_GMII);
367 dev_err(&pdev->dev, "Could not attach to PHY\n");
371 /* MAC only supports 1000T full duplex mode */
372 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
373 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
374 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
375 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
376 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
378 /* Only symmetric pause with flow control enabled is supported so no
379 * need to negotiate pause.
381 linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->advertising);
382 linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->advertising);
384 /* Display information about attached PHY device */
385 phy_attached_info(phydev);
387 err = register_netdev(netdev);
389 dev_err(&pdev->dev, "Failed to register netdev\n");
390 phy_disconnect(phydev);
397 mlxbf_gige_gpio_free(priv);
398 mlxbf_gige_mdio_remove(priv);
402 static int mlxbf_gige_remove(struct platform_device *pdev)
404 struct mlxbf_gige *priv = platform_get_drvdata(pdev);
406 unregister_netdev(priv->netdev);
407 phy_disconnect(priv->netdev->phydev);
408 mlxbf_gige_gpio_free(priv);
409 mlxbf_gige_mdio_remove(priv);
410 platform_set_drvdata(pdev, NULL);
415 static void mlxbf_gige_shutdown(struct platform_device *pdev)
417 struct mlxbf_gige *priv = platform_get_drvdata(pdev);
419 writeq(0, priv->base + MLXBF_GIGE_INT_EN);
420 mlxbf_gige_clean_port(priv);
423 static const struct acpi_device_id __maybe_unused mlxbf_gige_acpi_match[] = {
427 MODULE_DEVICE_TABLE(acpi, mlxbf_gige_acpi_match);
429 static struct platform_driver mlxbf_gige_driver = {
430 .probe = mlxbf_gige_probe,
431 .remove = mlxbf_gige_remove,
432 .shutdown = mlxbf_gige_shutdown,
435 .acpi_match_table = ACPI_PTR(mlxbf_gige_acpi_match),
439 module_platform_driver(mlxbf_gige_driver);
441 MODULE_DESCRIPTION("Mellanox BlueField SoC Gigabit Ethernet Driver");
442 MODULE_AUTHOR("David Thompson <davthompson@nvidia.com>");
443 MODULE_AUTHOR("Asmaa Mnebhi <asmaa@nvidia.com>");
444 MODULE_LICENSE("Dual BSD/GPL");