1 /* SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause */
3 /* Header file for Gigabit Ethernet driver for Mellanox BlueField SoC
4 * - this file contains software data structures and any chip-specific
5 * data structures (e.g. TX WQE format) that are memory resident.
7 * Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES
10 #ifndef __MLXBF_GIGE_H__
11 #define __MLXBF_GIGE_H__
13 #include <linux/io-64-nonatomic-lo-hi.h>
14 #include <linux/irqreturn.h>
15 #include <linux/netdevice.h>
16 #include <linux/irq.h>
18 /* The silicon design supports a maximum RX ring size of
19 * 32K entries. Based on current testing this maximum size
20 * is not required to be supported. Instead the RX ring
21 * will be capped at a realistic value of 1024 entries.
23 #define MLXBF_GIGE_MIN_RXQ_SZ 32
24 #define MLXBF_GIGE_MAX_RXQ_SZ 1024
25 #define MLXBF_GIGE_DEFAULT_RXQ_SZ 128
27 #define MLXBF_GIGE_MIN_TXQ_SZ 4
28 #define MLXBF_GIGE_MAX_TXQ_SZ 256
29 #define MLXBF_GIGE_DEFAULT_TXQ_SZ 128
31 #define MLXBF_GIGE_DEFAULT_BUF_SZ 2048
33 #define MLXBF_GIGE_DMA_PAGE_SZ 4096
34 #define MLXBF_GIGE_DMA_PAGE_SHIFT 12
36 /* There are four individual MAC RX filters. Currently
37 * two of them are being used: one for the broadcast MAC
38 * (index 0) and one for local MAC (index 1)
40 #define MLXBF_GIGE_BCAST_MAC_FILTER_IDX 0
41 #define MLXBF_GIGE_LOCAL_MAC_FILTER_IDX 1
43 /* Define for broadcast MAC literal */
44 #define BCAST_MAC_ADDR 0xFFFFFFFFFFFF
46 /* There are three individual interrupts:
47 * 1) Errors, "OOB" interrupt line
48 * 2) Receive Packet, "OOB_LLU" interrupt line
49 * 3) LLU and PLU Events, "OOB_PLU" interrupt line
51 #define MLXBF_GIGE_ERROR_INTR_IDX 0
52 #define MLXBF_GIGE_RECEIVE_PKT_INTR_IDX 1
53 #define MLXBF_GIGE_LLU_PLU_INTR_IDX 2
55 struct mlxbf_gige_stats {
57 u64 tx_invalid_checksums;
62 u64 rx_truncate_errors;
64 u64 rx_din_dropped_pkts;
66 u64 rx_filter_passed_pkts;
67 u64 rx_filter_discard_pkts;
72 void __iomem *llu_base;
73 void __iomem *plu_base;
75 struct net_device *netdev;
76 struct platform_device *pdev;
77 void __iomem *mdio_io;
79 struct mii_bus *mdiobus;
80 spinlock_t lock; /* for packet processing indices */
84 dma_addr_t tx_wqe_base_dma;
88 dma_addr_t *rx_wqe_base;
89 dma_addr_t rx_wqe_base_dma;
91 dma_addr_t rx_cqe_base_dma;
94 struct sk_buff *rx_skb[MLXBF_GIGE_MAX_RXQ_SZ];
95 struct sk_buff *tx_skb[MLXBF_GIGE_MAX_TXQ_SZ];
101 bool promisc_enabled;
103 struct napi_struct napi;
104 struct mlxbf_gige_stats stats;
107 /* Rx Work Queue Element definitions */
108 #define MLXBF_GIGE_RX_WQE_SZ 8
110 /* Rx Completion Queue Element definitions */
111 #define MLXBF_GIGE_RX_CQE_SZ 8
112 #define MLXBF_GIGE_RX_CQE_PKT_LEN_MASK GENMASK(10, 0)
113 #define MLXBF_GIGE_RX_CQE_VALID_MASK GENMASK(11, 11)
114 #define MLXBF_GIGE_RX_CQE_PKT_STATUS_MASK GENMASK(15, 12)
115 #define MLXBF_GIGE_RX_CQE_PKT_STATUS_MAC_ERR GENMASK(12, 12)
116 #define MLXBF_GIGE_RX_CQE_PKT_STATUS_TRUNCATED GENMASK(13, 13)
117 #define MLXBF_GIGE_RX_CQE_CHKSUM_MASK GENMASK(31, 16)
119 /* Tx Work Queue Element definitions */
120 #define MLXBF_GIGE_TX_WQE_SZ_QWORDS 2
121 #define MLXBF_GIGE_TX_WQE_SZ 16
122 #define MLXBF_GIGE_TX_WQE_PKT_LEN_MASK GENMASK(10, 0)
123 #define MLXBF_GIGE_TX_WQE_UPDATE_MASK GENMASK(31, 31)
124 #define MLXBF_GIGE_TX_WQE_CHKSUM_LEN_MASK GENMASK(42, 32)
125 #define MLXBF_GIGE_TX_WQE_CHKSUM_START_MASK GENMASK(55, 48)
126 #define MLXBF_GIGE_TX_WQE_CHKSUM_OFFSET_MASK GENMASK(63, 56)
128 /* Macro to return packet length of specified TX WQE */
129 #define MLXBF_GIGE_TX_WQE_PKT_LEN(tx_wqe_addr) \
130 (*((tx_wqe_addr) + 1) & MLXBF_GIGE_TX_WQE_PKT_LEN_MASK)
132 /* Tx Completion Count */
133 #define MLXBF_GIGE_TX_CC_SZ 8
135 /* List of resources in ACPI table */
136 enum mlxbf_gige_res {
138 MLXBF_GIGE_RES_MDIO9,
139 MLXBF_GIGE_RES_GPIO0,
145 /* Version of register data returned by mlxbf_gige_get_regs() */
146 #define MLXBF_GIGE_REGS_VERSION 1
148 int mlxbf_gige_mdio_probe(struct platform_device *pdev,
149 struct mlxbf_gige *priv);
150 void mlxbf_gige_mdio_remove(struct mlxbf_gige *priv);
151 irqreturn_t mlxbf_gige_mdio_handle_phy_interrupt(int irq, void *dev_id);
152 void mlxbf_gige_mdio_enable_phy_int(struct mlxbf_gige *priv);
154 void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv,
155 unsigned int index, u64 dmac);
156 void mlxbf_gige_get_mac_rx_filter(struct mlxbf_gige *priv,
157 unsigned int index, u64 *dmac);
158 void mlxbf_gige_enable_promisc(struct mlxbf_gige *priv);
159 void mlxbf_gige_disable_promisc(struct mlxbf_gige *priv);
160 int mlxbf_gige_rx_init(struct mlxbf_gige *priv);
161 void mlxbf_gige_rx_deinit(struct mlxbf_gige *priv);
162 int mlxbf_gige_tx_init(struct mlxbf_gige *priv);
163 void mlxbf_gige_tx_deinit(struct mlxbf_gige *priv);
164 bool mlxbf_gige_handle_tx_complete(struct mlxbf_gige *priv);
165 netdev_tx_t mlxbf_gige_start_xmit(struct sk_buff *skb,
166 struct net_device *netdev);
167 struct sk_buff *mlxbf_gige_alloc_skb(struct mlxbf_gige *priv,
168 unsigned int map_len,
170 enum dma_data_direction dir);
171 int mlxbf_gige_request_irqs(struct mlxbf_gige *priv);
172 void mlxbf_gige_free_irqs(struct mlxbf_gige *priv);
173 int mlxbf_gige_poll(struct napi_struct *napi, int budget);
174 extern const struct ethtool_ops mlxbf_gige_ethtool_ops;
175 void mlxbf_gige_update_tx_wqe_next(struct mlxbf_gige *priv);
177 #endif /* !defined(__MLXBF_GIGE_H__) */