net: stmmac: replace the has_sun8i field with a flag
[platform/kernel/linux-rpi.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5
6         Copyright(C) 2007-2011 STMicroelectronics Ltd
7
8
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10
11   Documentation available at:
12         http://www.stlinux.com
13   Support available at:
14         https://bugzilla.stlinux.com/
15 *******************************************************************************/
16
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/pkt_cls.h>
43 #include <net/xdp_sock_drv.h>
44 #include "stmmac_ptp.h"
45 #include "stmmac.h"
46 #include "stmmac_xdp.h"
47 #include <linux/reset.h>
48 #include <linux/of_mdio.h>
49 #include "dwmac1000.h"
50 #include "dwxgmac2.h"
51 #include "hwif.h"
52
53 /* As long as the interface is active, we keep the timestamping counter enabled
54  * with fine resolution and binary rollover. This avoid non-monotonic behavior
55  * (clock jumps) when changing timestamping settings at runtime.
56  */
57 #define STMMAC_HWTS_ACTIVE      (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
58                                  PTP_TCR_TSCTRLSSR)
59
60 #define STMMAC_ALIGN(x)         ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
61 #define TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
62
63 /* Module parameters */
64 #define TX_TIMEO        5000
65 static int watchdog = TX_TIMEO;
66 module_param(watchdog, int, 0644);
67 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
68
69 static int debug = -1;
70 module_param(debug, int, 0644);
71 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
72
73 static int phyaddr = -1;
74 module_param(phyaddr, int, 0444);
75 MODULE_PARM_DESC(phyaddr, "Physical device address");
76
77 #define STMMAC_TX_THRESH(x)     ((x)->dma_conf.dma_tx_size / 4)
78 #define STMMAC_RX_THRESH(x)     ((x)->dma_conf.dma_rx_size / 4)
79
80 /* Limit to make sure XDP TX and slow path can coexist */
81 #define STMMAC_XSK_TX_BUDGET_MAX        256
82 #define STMMAC_TX_XSK_AVAIL             16
83 #define STMMAC_RX_FILL_BATCH            16
84
85 #define STMMAC_XDP_PASS         0
86 #define STMMAC_XDP_CONSUMED     BIT(0)
87 #define STMMAC_XDP_TX           BIT(1)
88 #define STMMAC_XDP_REDIRECT     BIT(2)
89
90 static int flow_ctrl = FLOW_AUTO;
91 module_param(flow_ctrl, int, 0644);
92 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
93
94 static int pause = PAUSE_TIME;
95 module_param(pause, int, 0644);
96 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
97
98 #define TC_DEFAULT 64
99 static int tc = TC_DEFAULT;
100 module_param(tc, int, 0644);
101 MODULE_PARM_DESC(tc, "DMA threshold control value");
102
103 #define DEFAULT_BUFSIZE 1536
104 static int buf_sz = DEFAULT_BUFSIZE;
105 module_param(buf_sz, int, 0644);
106 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
107
108 #define STMMAC_RX_COPYBREAK     256
109
110 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
111                                       NETIF_MSG_LINK | NETIF_MSG_IFUP |
112                                       NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
113
114 #define STMMAC_DEFAULT_LPI_TIMER        1000
115 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
116 module_param(eee_timer, int, 0644);
117 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
118 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
119
120 /* By default the driver will use the ring mode to manage tx and rx descriptors,
121  * but allow user to force to use the chain instead of the ring
122  */
123 static unsigned int chain_mode;
124 module_param(chain_mode, int, 0444);
125 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
126
127 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
128 /* For MSI interrupts handling */
129 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
130 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
132 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
133 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
134 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
136 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
137 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
138 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
139                                           u32 rxmode, u32 chan);
140
141 #ifdef CONFIG_DEBUG_FS
142 static const struct net_device_ops stmmac_netdev_ops;
143 static void stmmac_init_fs(struct net_device *dev);
144 static void stmmac_exit_fs(struct net_device *dev);
145 #endif
146
147 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
148
149 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
150 {
151         int ret = 0;
152
153         if (enabled) {
154                 ret = clk_prepare_enable(priv->plat->stmmac_clk);
155                 if (ret)
156                         return ret;
157                 ret = clk_prepare_enable(priv->plat->pclk);
158                 if (ret) {
159                         clk_disable_unprepare(priv->plat->stmmac_clk);
160                         return ret;
161                 }
162                 if (priv->plat->clks_config) {
163                         ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
164                         if (ret) {
165                                 clk_disable_unprepare(priv->plat->stmmac_clk);
166                                 clk_disable_unprepare(priv->plat->pclk);
167                                 return ret;
168                         }
169                 }
170         } else {
171                 clk_disable_unprepare(priv->plat->stmmac_clk);
172                 clk_disable_unprepare(priv->plat->pclk);
173                 if (priv->plat->clks_config)
174                         priv->plat->clks_config(priv->plat->bsp_priv, enabled);
175         }
176
177         return ret;
178 }
179 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
180
181 /**
182  * stmmac_verify_args - verify the driver parameters.
183  * Description: it checks the driver parameters and set a default in case of
184  * errors.
185  */
186 static void stmmac_verify_args(void)
187 {
188         if (unlikely(watchdog < 0))
189                 watchdog = TX_TIMEO;
190         if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
191                 buf_sz = DEFAULT_BUFSIZE;
192         if (unlikely(flow_ctrl > 1))
193                 flow_ctrl = FLOW_AUTO;
194         else if (likely(flow_ctrl < 0))
195                 flow_ctrl = FLOW_OFF;
196         if (unlikely((pause < 0) || (pause > 0xffff)))
197                 pause = PAUSE_TIME;
198         if (eee_timer < 0)
199                 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
200 }
201
202 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
203 {
204         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
205         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
206         u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
207         u32 queue;
208
209         for (queue = 0; queue < maxq; queue++) {
210                 struct stmmac_channel *ch = &priv->channel[queue];
211
212                 if (stmmac_xdp_is_enabled(priv) &&
213                     test_bit(queue, priv->af_xdp_zc_qps)) {
214                         napi_disable(&ch->rxtx_napi);
215                         continue;
216                 }
217
218                 if (queue < rx_queues_cnt)
219                         napi_disable(&ch->rx_napi);
220                 if (queue < tx_queues_cnt)
221                         napi_disable(&ch->tx_napi);
222         }
223 }
224
225 /**
226  * stmmac_disable_all_queues - Disable all queues
227  * @priv: driver private structure
228  */
229 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
230 {
231         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
232         struct stmmac_rx_queue *rx_q;
233         u32 queue;
234
235         /* synchronize_rcu() needed for pending XDP buffers to drain */
236         for (queue = 0; queue < rx_queues_cnt; queue++) {
237                 rx_q = &priv->dma_conf.rx_queue[queue];
238                 if (rx_q->xsk_pool) {
239                         synchronize_rcu();
240                         break;
241                 }
242         }
243
244         __stmmac_disable_all_queues(priv);
245 }
246
247 /**
248  * stmmac_enable_all_queues - Enable all queues
249  * @priv: driver private structure
250  */
251 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
252 {
253         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
254         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
255         u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
256         u32 queue;
257
258         for (queue = 0; queue < maxq; queue++) {
259                 struct stmmac_channel *ch = &priv->channel[queue];
260
261                 if (stmmac_xdp_is_enabled(priv) &&
262                     test_bit(queue, priv->af_xdp_zc_qps)) {
263                         napi_enable(&ch->rxtx_napi);
264                         continue;
265                 }
266
267                 if (queue < rx_queues_cnt)
268                         napi_enable(&ch->rx_napi);
269                 if (queue < tx_queues_cnt)
270                         napi_enable(&ch->tx_napi);
271         }
272 }
273
274 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
275 {
276         if (!test_bit(STMMAC_DOWN, &priv->state) &&
277             !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
278                 queue_work(priv->wq, &priv->service_task);
279 }
280
281 static void stmmac_global_err(struct stmmac_priv *priv)
282 {
283         netif_carrier_off(priv->dev);
284         set_bit(STMMAC_RESET_REQUESTED, &priv->state);
285         stmmac_service_event_schedule(priv);
286 }
287
288 /**
289  * stmmac_clk_csr_set - dynamically set the MDC clock
290  * @priv: driver private structure
291  * Description: this is to dynamically set the MDC clock according to the csr
292  * clock input.
293  * Note:
294  *      If a specific clk_csr value is passed from the platform
295  *      this means that the CSR Clock Range selection cannot be
296  *      changed at run-time and it is fixed (as reported in the driver
297  *      documentation). Viceversa the driver will try to set the MDC
298  *      clock dynamically according to the actual clock input.
299  */
300 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
301 {
302         u32 clk_rate;
303
304         clk_rate = clk_get_rate(priv->plat->stmmac_clk);
305
306         /* Platform provided default clk_csr would be assumed valid
307          * for all other cases except for the below mentioned ones.
308          * For values higher than the IEEE 802.3 specified frequency
309          * we can not estimate the proper divider as it is not known
310          * the frequency of clk_csr_i. So we do not change the default
311          * divider.
312          */
313         if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
314                 if (clk_rate < CSR_F_35M)
315                         priv->clk_csr = STMMAC_CSR_20_35M;
316                 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
317                         priv->clk_csr = STMMAC_CSR_35_60M;
318                 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
319                         priv->clk_csr = STMMAC_CSR_60_100M;
320                 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
321                         priv->clk_csr = STMMAC_CSR_100_150M;
322                 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
323                         priv->clk_csr = STMMAC_CSR_150_250M;
324                 else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
325                         priv->clk_csr = STMMAC_CSR_250_300M;
326         }
327
328         if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
329                 if (clk_rate > 160000000)
330                         priv->clk_csr = 0x03;
331                 else if (clk_rate > 80000000)
332                         priv->clk_csr = 0x02;
333                 else if (clk_rate > 40000000)
334                         priv->clk_csr = 0x01;
335                 else
336                         priv->clk_csr = 0;
337         }
338
339         if (priv->plat->has_xgmac) {
340                 if (clk_rate > 400000000)
341                         priv->clk_csr = 0x5;
342                 else if (clk_rate > 350000000)
343                         priv->clk_csr = 0x4;
344                 else if (clk_rate > 300000000)
345                         priv->clk_csr = 0x3;
346                 else if (clk_rate > 250000000)
347                         priv->clk_csr = 0x2;
348                 else if (clk_rate > 150000000)
349                         priv->clk_csr = 0x1;
350                 else
351                         priv->clk_csr = 0x0;
352         }
353 }
354
355 static void print_pkt(unsigned char *buf, int len)
356 {
357         pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
358         print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
359 }
360
361 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
362 {
363         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
364         u32 avail;
365
366         if (tx_q->dirty_tx > tx_q->cur_tx)
367                 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
368         else
369                 avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
370
371         return avail;
372 }
373
374 /**
375  * stmmac_rx_dirty - Get RX queue dirty
376  * @priv: driver private structure
377  * @queue: RX queue index
378  */
379 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
380 {
381         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
382         u32 dirty;
383
384         if (rx_q->dirty_rx <= rx_q->cur_rx)
385                 dirty = rx_q->cur_rx - rx_q->dirty_rx;
386         else
387                 dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
388
389         return dirty;
390 }
391
392 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
393 {
394         int tx_lpi_timer;
395
396         /* Clear/set the SW EEE timer flag based on LPI ET enablement */
397         priv->eee_sw_timer_en = en ? 0 : 1;
398         tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
399         stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
400 }
401
402 /**
403  * stmmac_enable_eee_mode - check and enter in LPI mode
404  * @priv: driver private structure
405  * Description: this function is to verify and enter in LPI mode in case of
406  * EEE.
407  */
408 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
409 {
410         u32 tx_cnt = priv->plat->tx_queues_to_use;
411         u32 queue;
412
413         /* check if all TX queues have the work finished */
414         for (queue = 0; queue < tx_cnt; queue++) {
415                 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
416
417                 if (tx_q->dirty_tx != tx_q->cur_tx)
418                         return -EBUSY; /* still unfinished work */
419         }
420
421         /* Check and enter in LPI mode */
422         if (!priv->tx_path_in_lpi_mode)
423                 stmmac_set_eee_mode(priv, priv->hw,
424                                 priv->plat->en_tx_lpi_clockgating);
425         return 0;
426 }
427
428 /**
429  * stmmac_disable_eee_mode - disable and exit from LPI mode
430  * @priv: driver private structure
431  * Description: this function is to exit and disable EEE in case of
432  * LPI state is true. This is called by the xmit.
433  */
434 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
435 {
436         if (!priv->eee_sw_timer_en) {
437                 stmmac_lpi_entry_timer_config(priv, 0);
438                 return;
439         }
440
441         stmmac_reset_eee_mode(priv, priv->hw);
442         del_timer_sync(&priv->eee_ctrl_timer);
443         priv->tx_path_in_lpi_mode = false;
444 }
445
446 /**
447  * stmmac_eee_ctrl_timer - EEE TX SW timer.
448  * @t:  timer_list struct containing private info
449  * Description:
450  *  if there is no data transfer and if we are not in LPI state,
451  *  then MAC Transmitter can be moved to LPI state.
452  */
453 static void stmmac_eee_ctrl_timer(struct timer_list *t)
454 {
455         struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
456
457         if (stmmac_enable_eee_mode(priv))
458                 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
459 }
460
461 /**
462  * stmmac_eee_init - init EEE
463  * @priv: driver private structure
464  * Description:
465  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
466  *  can also manage EEE, this function enable the LPI state and start related
467  *  timer.
468  */
469 bool stmmac_eee_init(struct stmmac_priv *priv)
470 {
471         int eee_tw_timer = priv->eee_tw_timer;
472
473         /* Using PCS we cannot dial with the phy registers at this stage
474          * so we do not support extra feature like EEE.
475          */
476         if (priv->hw->pcs == STMMAC_PCS_TBI ||
477             priv->hw->pcs == STMMAC_PCS_RTBI)
478                 return false;
479
480         /* Check if MAC core supports the EEE feature. */
481         if (!priv->dma_cap.eee)
482                 return false;
483
484         mutex_lock(&priv->lock);
485
486         /* Check if it needs to be deactivated */
487         if (!priv->eee_active) {
488                 if (priv->eee_enabled) {
489                         netdev_dbg(priv->dev, "disable EEE\n");
490                         stmmac_lpi_entry_timer_config(priv, 0);
491                         del_timer_sync(&priv->eee_ctrl_timer);
492                         stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
493                         if (priv->hw->xpcs)
494                                 xpcs_config_eee(priv->hw->xpcs,
495                                                 priv->plat->mult_fact_100ns,
496                                                 false);
497                 }
498                 mutex_unlock(&priv->lock);
499                 return false;
500         }
501
502         if (priv->eee_active && !priv->eee_enabled) {
503                 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
504                 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
505                                      eee_tw_timer);
506                 if (priv->hw->xpcs)
507                         xpcs_config_eee(priv->hw->xpcs,
508                                         priv->plat->mult_fact_100ns,
509                                         true);
510         }
511
512         if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
513                 del_timer_sync(&priv->eee_ctrl_timer);
514                 priv->tx_path_in_lpi_mode = false;
515                 stmmac_lpi_entry_timer_config(priv, 1);
516         } else {
517                 stmmac_lpi_entry_timer_config(priv, 0);
518                 mod_timer(&priv->eee_ctrl_timer,
519                           STMMAC_LPI_T(priv->tx_lpi_timer));
520         }
521
522         mutex_unlock(&priv->lock);
523         netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
524         return true;
525 }
526
527 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
528  * @priv: driver private structure
529  * @p : descriptor pointer
530  * @skb : the socket buffer
531  * Description :
532  * This function will read timestamp from the descriptor & pass it to stack.
533  * and also perform some sanity checks.
534  */
535 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
536                                    struct dma_desc *p, struct sk_buff *skb)
537 {
538         struct skb_shared_hwtstamps shhwtstamp;
539         bool found = false;
540         u64 ns = 0;
541
542         if (!priv->hwts_tx_en)
543                 return;
544
545         /* exit if skb doesn't support hw tstamp */
546         if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
547                 return;
548
549         /* check tx tstamp status */
550         if (stmmac_get_tx_timestamp_status(priv, p)) {
551                 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
552                 found = true;
553         } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
554                 found = true;
555         }
556
557         if (found) {
558                 ns -= priv->plat->cdc_error_adj;
559
560                 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
561                 shhwtstamp.hwtstamp = ns_to_ktime(ns);
562
563                 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
564                 /* pass tstamp to stack */
565                 skb_tstamp_tx(skb, &shhwtstamp);
566         }
567 }
568
569 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
570  * @priv: driver private structure
571  * @p : descriptor pointer
572  * @np : next descriptor pointer
573  * @skb : the socket buffer
574  * Description :
575  * This function will read received packet's timestamp from the descriptor
576  * and pass it to stack. It also perform some sanity checks.
577  */
578 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
579                                    struct dma_desc *np, struct sk_buff *skb)
580 {
581         struct skb_shared_hwtstamps *shhwtstamp = NULL;
582         struct dma_desc *desc = p;
583         u64 ns = 0;
584
585         if (!priv->hwts_rx_en)
586                 return;
587         /* For GMAC4, the valid timestamp is from CTX next desc. */
588         if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
589                 desc = np;
590
591         /* Check if timestamp is available */
592         if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
593                 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
594
595                 ns -= priv->plat->cdc_error_adj;
596
597                 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
598                 shhwtstamp = skb_hwtstamps(skb);
599                 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
600                 shhwtstamp->hwtstamp = ns_to_ktime(ns);
601         } else  {
602                 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
603         }
604 }
605
606 /**
607  *  stmmac_hwtstamp_set - control hardware timestamping.
608  *  @dev: device pointer.
609  *  @ifr: An IOCTL specific structure, that can contain a pointer to
610  *  a proprietary structure used to pass information to the driver.
611  *  Description:
612  *  This function configures the MAC to enable/disable both outgoing(TX)
613  *  and incoming(RX) packets time stamping based on user input.
614  *  Return Value:
615  *  0 on success and an appropriate -ve integer on failure.
616  */
617 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
618 {
619         struct stmmac_priv *priv = netdev_priv(dev);
620         struct hwtstamp_config config;
621         u32 ptp_v2 = 0;
622         u32 tstamp_all = 0;
623         u32 ptp_over_ipv4_udp = 0;
624         u32 ptp_over_ipv6_udp = 0;
625         u32 ptp_over_ethernet = 0;
626         u32 snap_type_sel = 0;
627         u32 ts_master_en = 0;
628         u32 ts_event_en = 0;
629
630         if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
631                 netdev_alert(priv->dev, "No support for HW time stamping\n");
632                 priv->hwts_tx_en = 0;
633                 priv->hwts_rx_en = 0;
634
635                 return -EOPNOTSUPP;
636         }
637
638         if (copy_from_user(&config, ifr->ifr_data,
639                            sizeof(config)))
640                 return -EFAULT;
641
642         netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
643                    __func__, config.flags, config.tx_type, config.rx_filter);
644
645         if (config.tx_type != HWTSTAMP_TX_OFF &&
646             config.tx_type != HWTSTAMP_TX_ON)
647                 return -ERANGE;
648
649         if (priv->adv_ts) {
650                 switch (config.rx_filter) {
651                 case HWTSTAMP_FILTER_NONE:
652                         /* time stamp no incoming packet at all */
653                         config.rx_filter = HWTSTAMP_FILTER_NONE;
654                         break;
655
656                 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
657                         /* PTP v1, UDP, any kind of event packet */
658                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
659                         /* 'xmac' hardware can support Sync, Pdelay_Req and
660                          * Pdelay_resp by setting bit14 and bits17/16 to 01
661                          * This leaves Delay_Req timestamps out.
662                          * Enable all events *and* general purpose message
663                          * timestamping
664                          */
665                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
666                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
667                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
668                         break;
669
670                 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
671                         /* PTP v1, UDP, Sync packet */
672                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
673                         /* take time stamp for SYNC messages only */
674                         ts_event_en = PTP_TCR_TSEVNTENA;
675
676                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
677                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
678                         break;
679
680                 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
681                         /* PTP v1, UDP, Delay_req packet */
682                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
683                         /* take time stamp for Delay_Req messages only */
684                         ts_master_en = PTP_TCR_TSMSTRENA;
685                         ts_event_en = PTP_TCR_TSEVNTENA;
686
687                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
688                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
689                         break;
690
691                 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
692                         /* PTP v2, UDP, any kind of event packet */
693                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
694                         ptp_v2 = PTP_TCR_TSVER2ENA;
695                         /* take time stamp for all event messages */
696                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
697
698                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
699                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
700                         break;
701
702                 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
703                         /* PTP v2, UDP, Sync packet */
704                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
705                         ptp_v2 = PTP_TCR_TSVER2ENA;
706                         /* take time stamp for SYNC messages only */
707                         ts_event_en = PTP_TCR_TSEVNTENA;
708
709                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
710                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
711                         break;
712
713                 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
714                         /* PTP v2, UDP, Delay_req packet */
715                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
716                         ptp_v2 = PTP_TCR_TSVER2ENA;
717                         /* take time stamp for Delay_Req messages only */
718                         ts_master_en = PTP_TCR_TSMSTRENA;
719                         ts_event_en = PTP_TCR_TSEVNTENA;
720
721                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
722                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
723                         break;
724
725                 case HWTSTAMP_FILTER_PTP_V2_EVENT:
726                         /* PTP v2/802.AS1 any layer, any kind of event packet */
727                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
728                         ptp_v2 = PTP_TCR_TSVER2ENA;
729                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
730                         if (priv->synopsys_id < DWMAC_CORE_4_10)
731                                 ts_event_en = PTP_TCR_TSEVNTENA;
732                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
733                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
734                         ptp_over_ethernet = PTP_TCR_TSIPENA;
735                         break;
736
737                 case HWTSTAMP_FILTER_PTP_V2_SYNC:
738                         /* PTP v2/802.AS1, any layer, Sync packet */
739                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
740                         ptp_v2 = PTP_TCR_TSVER2ENA;
741                         /* take time stamp for SYNC messages only */
742                         ts_event_en = PTP_TCR_TSEVNTENA;
743
744                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
745                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
746                         ptp_over_ethernet = PTP_TCR_TSIPENA;
747                         break;
748
749                 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
750                         /* PTP v2/802.AS1, any layer, Delay_req packet */
751                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
752                         ptp_v2 = PTP_TCR_TSVER2ENA;
753                         /* take time stamp for Delay_Req messages only */
754                         ts_master_en = PTP_TCR_TSMSTRENA;
755                         ts_event_en = PTP_TCR_TSEVNTENA;
756
757                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
758                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
759                         ptp_over_ethernet = PTP_TCR_TSIPENA;
760                         break;
761
762                 case HWTSTAMP_FILTER_NTP_ALL:
763                 case HWTSTAMP_FILTER_ALL:
764                         /* time stamp any incoming packet */
765                         config.rx_filter = HWTSTAMP_FILTER_ALL;
766                         tstamp_all = PTP_TCR_TSENALL;
767                         break;
768
769                 default:
770                         return -ERANGE;
771                 }
772         } else {
773                 switch (config.rx_filter) {
774                 case HWTSTAMP_FILTER_NONE:
775                         config.rx_filter = HWTSTAMP_FILTER_NONE;
776                         break;
777                 default:
778                         /* PTP v1, UDP, any kind of event packet */
779                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
780                         break;
781                 }
782         }
783         priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
784         priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
785
786         priv->systime_flags = STMMAC_HWTS_ACTIVE;
787
788         if (priv->hwts_tx_en || priv->hwts_rx_en) {
789                 priv->systime_flags |= tstamp_all | ptp_v2 |
790                                        ptp_over_ethernet | ptp_over_ipv6_udp |
791                                        ptp_over_ipv4_udp | ts_event_en |
792                                        ts_master_en | snap_type_sel;
793         }
794
795         stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
796
797         memcpy(&priv->tstamp_config, &config, sizeof(config));
798
799         return copy_to_user(ifr->ifr_data, &config,
800                             sizeof(config)) ? -EFAULT : 0;
801 }
802
803 /**
804  *  stmmac_hwtstamp_get - read hardware timestamping.
805  *  @dev: device pointer.
806  *  @ifr: An IOCTL specific structure, that can contain a pointer to
807  *  a proprietary structure used to pass information to the driver.
808  *  Description:
809  *  This function obtain the current hardware timestamping settings
810  *  as requested.
811  */
812 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
813 {
814         struct stmmac_priv *priv = netdev_priv(dev);
815         struct hwtstamp_config *config = &priv->tstamp_config;
816
817         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
818                 return -EOPNOTSUPP;
819
820         return copy_to_user(ifr->ifr_data, config,
821                             sizeof(*config)) ? -EFAULT : 0;
822 }
823
824 /**
825  * stmmac_init_tstamp_counter - init hardware timestamping counter
826  * @priv: driver private structure
827  * @systime_flags: timestamping flags
828  * Description:
829  * Initialize hardware counter for packet timestamping.
830  * This is valid as long as the interface is open and not suspended.
831  * Will be rerun after resuming from suspend, case in which the timestamping
832  * flags updated by stmmac_hwtstamp_set() also need to be restored.
833  */
834 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
835 {
836         bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
837         struct timespec64 now;
838         u32 sec_inc = 0;
839         u64 temp = 0;
840
841         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
842                 return -EOPNOTSUPP;
843
844         stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
845         priv->systime_flags = systime_flags;
846
847         /* program Sub Second Increment reg */
848         stmmac_config_sub_second_increment(priv, priv->ptpaddr,
849                                            priv->plat->clk_ptp_rate,
850                                            xmac, &sec_inc);
851         temp = div_u64(1000000000ULL, sec_inc);
852
853         /* Store sub second increment for later use */
854         priv->sub_second_inc = sec_inc;
855
856         /* calculate default added value:
857          * formula is :
858          * addend = (2^32)/freq_div_ratio;
859          * where, freq_div_ratio = 1e9ns/sec_inc
860          */
861         temp = (u64)(temp << 32);
862         priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
863         stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
864
865         /* initialize system time */
866         ktime_get_real_ts64(&now);
867
868         /* lower 32 bits of tv_sec are safe until y2106 */
869         stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
870
871         return 0;
872 }
873 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
874
875 /**
876  * stmmac_init_ptp - init PTP
877  * @priv: driver private structure
878  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
879  * This is done by looking at the HW cap. register.
880  * This function also registers the ptp driver.
881  */
882 static int stmmac_init_ptp(struct stmmac_priv *priv)
883 {
884         bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
885         int ret;
886
887         if (priv->plat->ptp_clk_freq_config)
888                 priv->plat->ptp_clk_freq_config(priv);
889
890         ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
891         if (ret)
892                 return ret;
893
894         priv->adv_ts = 0;
895         /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
896         if (xmac && priv->dma_cap.atime_stamp)
897                 priv->adv_ts = 1;
898         /* Dwmac 3.x core with extend_desc can support adv_ts */
899         else if (priv->extend_desc && priv->dma_cap.atime_stamp)
900                 priv->adv_ts = 1;
901
902         if (priv->dma_cap.time_stamp)
903                 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
904
905         if (priv->adv_ts)
906                 netdev_info(priv->dev,
907                             "IEEE 1588-2008 Advanced Timestamp supported\n");
908
909         priv->hwts_tx_en = 0;
910         priv->hwts_rx_en = 0;
911
912         return 0;
913 }
914
915 static void stmmac_release_ptp(struct stmmac_priv *priv)
916 {
917         clk_disable_unprepare(priv->plat->clk_ptp_ref);
918         stmmac_ptp_unregister(priv);
919 }
920
921 /**
922  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
923  *  @priv: driver private structure
924  *  @duplex: duplex passed to the next function
925  *  Description: It is used for configuring the flow control in all queues
926  */
927 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
928 {
929         u32 tx_cnt = priv->plat->tx_queues_to_use;
930
931         stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
932                         priv->pause, tx_cnt);
933 }
934
935 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
936                                                  phy_interface_t interface)
937 {
938         struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
939
940         if (priv->hw->xpcs)
941                 return &priv->hw->xpcs->pcs;
942
943         if (priv->hw->lynx_pcs)
944                 return priv->hw->lynx_pcs;
945
946         return NULL;
947 }
948
949 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
950                               const struct phylink_link_state *state)
951 {
952         /* Nothing to do, xpcs_config() handles everything */
953 }
954
955 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
956 {
957         struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
958         enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
959         enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
960         bool *hs_enable = &fpe_cfg->hs_enable;
961
962         if (is_up && *hs_enable) {
963                 stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY);
964         } else {
965                 *lo_state = FPE_STATE_OFF;
966                 *lp_state = FPE_STATE_OFF;
967         }
968 }
969
970 static void stmmac_mac_link_down(struct phylink_config *config,
971                                  unsigned int mode, phy_interface_t interface)
972 {
973         struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
974
975         stmmac_mac_set(priv, priv->ioaddr, false);
976         priv->eee_active = false;
977         priv->tx_lpi_enabled = false;
978         priv->eee_enabled = stmmac_eee_init(priv);
979         stmmac_set_eee_pls(priv, priv->hw, false);
980
981         if (priv->dma_cap.fpesel)
982                 stmmac_fpe_link_state_handle(priv, false);
983 }
984
985 static void stmmac_mac_link_up(struct phylink_config *config,
986                                struct phy_device *phy,
987                                unsigned int mode, phy_interface_t interface,
988                                int speed, int duplex,
989                                bool tx_pause, bool rx_pause)
990 {
991         struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
992         u32 old_ctrl, ctrl;
993
994         if (priv->plat->serdes_up_after_phy_linkup && priv->plat->serdes_powerup)
995                 priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
996
997         old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
998         ctrl = old_ctrl & ~priv->hw->link.speed_mask;
999
1000         if (interface == PHY_INTERFACE_MODE_USXGMII) {
1001                 switch (speed) {
1002                 case SPEED_10000:
1003                         ctrl |= priv->hw->link.xgmii.speed10000;
1004                         break;
1005                 case SPEED_5000:
1006                         ctrl |= priv->hw->link.xgmii.speed5000;
1007                         break;
1008                 case SPEED_2500:
1009                         ctrl |= priv->hw->link.xgmii.speed2500;
1010                         break;
1011                 default:
1012                         return;
1013                 }
1014         } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1015                 switch (speed) {
1016                 case SPEED_100000:
1017                         ctrl |= priv->hw->link.xlgmii.speed100000;
1018                         break;
1019                 case SPEED_50000:
1020                         ctrl |= priv->hw->link.xlgmii.speed50000;
1021                         break;
1022                 case SPEED_40000:
1023                         ctrl |= priv->hw->link.xlgmii.speed40000;
1024                         break;
1025                 case SPEED_25000:
1026                         ctrl |= priv->hw->link.xlgmii.speed25000;
1027                         break;
1028                 case SPEED_10000:
1029                         ctrl |= priv->hw->link.xgmii.speed10000;
1030                         break;
1031                 case SPEED_2500:
1032                         ctrl |= priv->hw->link.speed2500;
1033                         break;
1034                 case SPEED_1000:
1035                         ctrl |= priv->hw->link.speed1000;
1036                         break;
1037                 default:
1038                         return;
1039                 }
1040         } else {
1041                 switch (speed) {
1042                 case SPEED_2500:
1043                         ctrl |= priv->hw->link.speed2500;
1044                         break;
1045                 case SPEED_1000:
1046                         ctrl |= priv->hw->link.speed1000;
1047                         break;
1048                 case SPEED_100:
1049                         ctrl |= priv->hw->link.speed100;
1050                         break;
1051                 case SPEED_10:
1052                         ctrl |= priv->hw->link.speed10;
1053                         break;
1054                 default:
1055                         return;
1056                 }
1057         }
1058
1059         priv->speed = speed;
1060
1061         if (priv->plat->fix_mac_speed)
1062                 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
1063
1064         if (!duplex)
1065                 ctrl &= ~priv->hw->link.duplex;
1066         else
1067                 ctrl |= priv->hw->link.duplex;
1068
1069         /* Flow Control operation */
1070         if (rx_pause && tx_pause)
1071                 priv->flow_ctrl = FLOW_AUTO;
1072         else if (rx_pause && !tx_pause)
1073                 priv->flow_ctrl = FLOW_RX;
1074         else if (!rx_pause && tx_pause)
1075                 priv->flow_ctrl = FLOW_TX;
1076         else
1077                 priv->flow_ctrl = FLOW_OFF;
1078
1079         stmmac_mac_flow_ctrl(priv, duplex);
1080
1081         if (ctrl != old_ctrl)
1082                 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1083
1084         stmmac_mac_set(priv, priv->ioaddr, true);
1085         if (phy && priv->dma_cap.eee) {
1086                 priv->eee_active =
1087                         phy_init_eee(phy, !priv->plat->rx_clk_runs_in_lpi) >= 0;
1088                 priv->eee_enabled = stmmac_eee_init(priv);
1089                 priv->tx_lpi_enabled = priv->eee_enabled;
1090                 stmmac_set_eee_pls(priv, priv->hw, true);
1091         }
1092
1093         if (priv->dma_cap.fpesel)
1094                 stmmac_fpe_link_state_handle(priv, true);
1095 }
1096
1097 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1098         .mac_select_pcs = stmmac_mac_select_pcs,
1099         .mac_config = stmmac_mac_config,
1100         .mac_link_down = stmmac_mac_link_down,
1101         .mac_link_up = stmmac_mac_link_up,
1102 };
1103
1104 /**
1105  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1106  * @priv: driver private structure
1107  * Description: this is to verify if the HW supports the PCS.
1108  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1109  * configured for the TBI, RTBI, or SGMII PHY interface.
1110  */
1111 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1112 {
1113         int interface = priv->plat->interface;
1114
1115         if (priv->dma_cap.pcs) {
1116                 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1117                     (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1118                     (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1119                     (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1120                         netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1121                         priv->hw->pcs = STMMAC_PCS_RGMII;
1122                 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
1123                         netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1124                         priv->hw->pcs = STMMAC_PCS_SGMII;
1125                 }
1126         }
1127 }
1128
1129 /**
1130  * stmmac_init_phy - PHY initialization
1131  * @dev: net device structure
1132  * Description: it initializes the driver's PHY state, and attaches the PHY
1133  * to the mac driver.
1134  *  Return value:
1135  *  0 on success
1136  */
1137 static int stmmac_init_phy(struct net_device *dev)
1138 {
1139         struct stmmac_priv *priv = netdev_priv(dev);
1140         struct fwnode_handle *phy_fwnode;
1141         struct fwnode_handle *fwnode;
1142         int ret;
1143
1144         if (!phylink_expects_phy(priv->phylink))
1145                 return 0;
1146
1147         fwnode = of_fwnode_handle(priv->plat->phylink_node);
1148         if (!fwnode)
1149                 fwnode = dev_fwnode(priv->device);
1150
1151         if (fwnode)
1152                 phy_fwnode = fwnode_get_phy_node(fwnode);
1153         else
1154                 phy_fwnode = NULL;
1155
1156         /* Some DT bindings do not set-up the PHY handle. Let's try to
1157          * manually parse it
1158          */
1159         if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1160                 int addr = priv->plat->phy_addr;
1161                 struct phy_device *phydev;
1162
1163                 if (addr < 0) {
1164                         netdev_err(priv->dev, "no phy found\n");
1165                         return -ENODEV;
1166                 }
1167
1168                 phydev = mdiobus_get_phy(priv->mii, addr);
1169                 if (!phydev) {
1170                         netdev_err(priv->dev, "no phy at addr %d\n", addr);
1171                         return -ENODEV;
1172                 }
1173
1174                 ret = phylink_connect_phy(priv->phylink, phydev);
1175         } else {
1176                 fwnode_handle_put(phy_fwnode);
1177                 ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1178         }
1179
1180         if (!priv->plat->pmt) {
1181                 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1182
1183                 phylink_ethtool_get_wol(priv->phylink, &wol);
1184                 device_set_wakeup_capable(priv->device, !!wol.supported);
1185                 device_set_wakeup_enable(priv->device, !!wol.wolopts);
1186         }
1187
1188         return ret;
1189 }
1190
1191 static int stmmac_phy_setup(struct stmmac_priv *priv)
1192 {
1193         struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
1194         struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
1195         int max_speed = priv->plat->max_speed;
1196         int mode = priv->plat->phy_interface;
1197         struct phylink *phylink;
1198
1199         priv->phylink_config.dev = &priv->dev->dev;
1200         priv->phylink_config.type = PHYLINK_NETDEV;
1201         if (priv->plat->mdio_bus_data)
1202                 priv->phylink_config.ovr_an_inband =
1203                         mdio_bus_data->xpcs_an_inband;
1204
1205         if (!fwnode)
1206                 fwnode = dev_fwnode(priv->device);
1207
1208         /* Set the platform/firmware specified interface mode */
1209         __set_bit(mode, priv->phylink_config.supported_interfaces);
1210
1211         /* If we have an xpcs, it defines which PHY interfaces are supported. */
1212         if (priv->hw->xpcs)
1213                 xpcs_get_interfaces(priv->hw->xpcs,
1214                                     priv->phylink_config.supported_interfaces);
1215
1216         priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1217                 MAC_10 | MAC_100;
1218
1219         if (!max_speed || max_speed >= 1000)
1220                 priv->phylink_config.mac_capabilities |= MAC_1000;
1221
1222         if (priv->plat->has_gmac4) {
1223                 if (!max_speed || max_speed >= 2500)
1224                         priv->phylink_config.mac_capabilities |= MAC_2500FD;
1225         } else if (priv->plat->has_xgmac) {
1226                 if (!max_speed || max_speed >= 2500)
1227                         priv->phylink_config.mac_capabilities |= MAC_2500FD;
1228                 if (!max_speed || max_speed >= 5000)
1229                         priv->phylink_config.mac_capabilities |= MAC_5000FD;
1230                 if (!max_speed || max_speed >= 10000)
1231                         priv->phylink_config.mac_capabilities |= MAC_10000FD;
1232                 if (!max_speed || max_speed >= 25000)
1233                         priv->phylink_config.mac_capabilities |= MAC_25000FD;
1234                 if (!max_speed || max_speed >= 40000)
1235                         priv->phylink_config.mac_capabilities |= MAC_40000FD;
1236                 if (!max_speed || max_speed >= 50000)
1237                         priv->phylink_config.mac_capabilities |= MAC_50000FD;
1238                 if (!max_speed || max_speed >= 100000)
1239                         priv->phylink_config.mac_capabilities |= MAC_100000FD;
1240         }
1241
1242         /* Half-Duplex can only work with single queue */
1243         if (priv->plat->tx_queues_to_use > 1)
1244                 priv->phylink_config.mac_capabilities &=
1245                         ~(MAC_10HD | MAC_100HD | MAC_1000HD);
1246         priv->phylink_config.mac_managed_pm = true;
1247
1248         phylink = phylink_create(&priv->phylink_config, fwnode,
1249                                  mode, &stmmac_phylink_mac_ops);
1250         if (IS_ERR(phylink))
1251                 return PTR_ERR(phylink);
1252
1253         priv->phylink = phylink;
1254         return 0;
1255 }
1256
1257 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1258                                     struct stmmac_dma_conf *dma_conf)
1259 {
1260         u32 rx_cnt = priv->plat->rx_queues_to_use;
1261         unsigned int desc_size;
1262         void *head_rx;
1263         u32 queue;
1264
1265         /* Display RX rings */
1266         for (queue = 0; queue < rx_cnt; queue++) {
1267                 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1268
1269                 pr_info("\tRX Queue %u rings\n", queue);
1270
1271                 if (priv->extend_desc) {
1272                         head_rx = (void *)rx_q->dma_erx;
1273                         desc_size = sizeof(struct dma_extended_desc);
1274                 } else {
1275                         head_rx = (void *)rx_q->dma_rx;
1276                         desc_size = sizeof(struct dma_desc);
1277                 }
1278
1279                 /* Display RX ring */
1280                 stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1281                                     rx_q->dma_rx_phy, desc_size);
1282         }
1283 }
1284
1285 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1286                                     struct stmmac_dma_conf *dma_conf)
1287 {
1288         u32 tx_cnt = priv->plat->tx_queues_to_use;
1289         unsigned int desc_size;
1290         void *head_tx;
1291         u32 queue;
1292
1293         /* Display TX rings */
1294         for (queue = 0; queue < tx_cnt; queue++) {
1295                 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1296
1297                 pr_info("\tTX Queue %d rings\n", queue);
1298
1299                 if (priv->extend_desc) {
1300                         head_tx = (void *)tx_q->dma_etx;
1301                         desc_size = sizeof(struct dma_extended_desc);
1302                 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1303                         head_tx = (void *)tx_q->dma_entx;
1304                         desc_size = sizeof(struct dma_edesc);
1305                 } else {
1306                         head_tx = (void *)tx_q->dma_tx;
1307                         desc_size = sizeof(struct dma_desc);
1308                 }
1309
1310                 stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1311                                     tx_q->dma_tx_phy, desc_size);
1312         }
1313 }
1314
1315 static void stmmac_display_rings(struct stmmac_priv *priv,
1316                                  struct stmmac_dma_conf *dma_conf)
1317 {
1318         /* Display RX ring */
1319         stmmac_display_rx_rings(priv, dma_conf);
1320
1321         /* Display TX ring */
1322         stmmac_display_tx_rings(priv, dma_conf);
1323 }
1324
1325 static int stmmac_set_bfsize(int mtu, int bufsize)
1326 {
1327         int ret = bufsize;
1328
1329         if (mtu >= BUF_SIZE_8KiB)
1330                 ret = BUF_SIZE_16KiB;
1331         else if (mtu >= BUF_SIZE_4KiB)
1332                 ret = BUF_SIZE_8KiB;
1333         else if (mtu >= BUF_SIZE_2KiB)
1334                 ret = BUF_SIZE_4KiB;
1335         else if (mtu > DEFAULT_BUFSIZE)
1336                 ret = BUF_SIZE_2KiB;
1337         else
1338                 ret = DEFAULT_BUFSIZE;
1339
1340         return ret;
1341 }
1342
1343 /**
1344  * stmmac_clear_rx_descriptors - clear RX descriptors
1345  * @priv: driver private structure
1346  * @dma_conf: structure to take the dma data
1347  * @queue: RX queue index
1348  * Description: this function is called to clear the RX descriptors
1349  * in case of both basic and extended descriptors are used.
1350  */
1351 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1352                                         struct stmmac_dma_conf *dma_conf,
1353                                         u32 queue)
1354 {
1355         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1356         int i;
1357
1358         /* Clear the RX descriptors */
1359         for (i = 0; i < dma_conf->dma_rx_size; i++)
1360                 if (priv->extend_desc)
1361                         stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1362                                         priv->use_riwt, priv->mode,
1363                                         (i == dma_conf->dma_rx_size - 1),
1364                                         dma_conf->dma_buf_sz);
1365                 else
1366                         stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1367                                         priv->use_riwt, priv->mode,
1368                                         (i == dma_conf->dma_rx_size - 1),
1369                                         dma_conf->dma_buf_sz);
1370 }
1371
1372 /**
1373  * stmmac_clear_tx_descriptors - clear tx descriptors
1374  * @priv: driver private structure
1375  * @dma_conf: structure to take the dma data
1376  * @queue: TX queue index.
1377  * Description: this function is called to clear the TX descriptors
1378  * in case of both basic and extended descriptors are used.
1379  */
1380 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1381                                         struct stmmac_dma_conf *dma_conf,
1382                                         u32 queue)
1383 {
1384         struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1385         int i;
1386
1387         /* Clear the TX descriptors */
1388         for (i = 0; i < dma_conf->dma_tx_size; i++) {
1389                 int last = (i == (dma_conf->dma_tx_size - 1));
1390                 struct dma_desc *p;
1391
1392                 if (priv->extend_desc)
1393                         p = &tx_q->dma_etx[i].basic;
1394                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1395                         p = &tx_q->dma_entx[i].basic;
1396                 else
1397                         p = &tx_q->dma_tx[i];
1398
1399                 stmmac_init_tx_desc(priv, p, priv->mode, last);
1400         }
1401 }
1402
1403 /**
1404  * stmmac_clear_descriptors - clear descriptors
1405  * @priv: driver private structure
1406  * @dma_conf: structure to take the dma data
1407  * Description: this function is called to clear the TX and RX descriptors
1408  * in case of both basic and extended descriptors are used.
1409  */
1410 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1411                                      struct stmmac_dma_conf *dma_conf)
1412 {
1413         u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1414         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1415         u32 queue;
1416
1417         /* Clear the RX descriptors */
1418         for (queue = 0; queue < rx_queue_cnt; queue++)
1419                 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1420
1421         /* Clear the TX descriptors */
1422         for (queue = 0; queue < tx_queue_cnt; queue++)
1423                 stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1424 }
1425
1426 /**
1427  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1428  * @priv: driver private structure
1429  * @dma_conf: structure to take the dma data
1430  * @p: descriptor pointer
1431  * @i: descriptor index
1432  * @flags: gfp flag
1433  * @queue: RX queue index
1434  * Description: this function is called to allocate a receive buffer, perform
1435  * the DMA mapping and init the descriptor.
1436  */
1437 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1438                                   struct stmmac_dma_conf *dma_conf,
1439                                   struct dma_desc *p,
1440                                   int i, gfp_t flags, u32 queue)
1441 {
1442         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1443         struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1444         gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1445
1446         if (priv->dma_cap.host_dma_width <= 32)
1447                 gfp |= GFP_DMA32;
1448
1449         if (!buf->page) {
1450                 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1451                 if (!buf->page)
1452                         return -ENOMEM;
1453                 buf->page_offset = stmmac_rx_offset(priv);
1454         }
1455
1456         if (priv->sph && !buf->sec_page) {
1457                 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1458                 if (!buf->sec_page)
1459                         return -ENOMEM;
1460
1461                 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1462                 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1463         } else {
1464                 buf->sec_page = NULL;
1465                 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1466         }
1467
1468         buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1469
1470         stmmac_set_desc_addr(priv, p, buf->addr);
1471         if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1472                 stmmac_init_desc3(priv, p);
1473
1474         return 0;
1475 }
1476
1477 /**
1478  * stmmac_free_rx_buffer - free RX dma buffers
1479  * @priv: private structure
1480  * @rx_q: RX queue
1481  * @i: buffer index.
1482  */
1483 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1484                                   struct stmmac_rx_queue *rx_q,
1485                                   int i)
1486 {
1487         struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1488
1489         if (buf->page)
1490                 page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1491         buf->page = NULL;
1492
1493         if (buf->sec_page)
1494                 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1495         buf->sec_page = NULL;
1496 }
1497
1498 /**
1499  * stmmac_free_tx_buffer - free RX dma buffers
1500  * @priv: private structure
1501  * @dma_conf: structure to take the dma data
1502  * @queue: RX queue index
1503  * @i: buffer index.
1504  */
1505 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1506                                   struct stmmac_dma_conf *dma_conf,
1507                                   u32 queue, int i)
1508 {
1509         struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1510
1511         if (tx_q->tx_skbuff_dma[i].buf &&
1512             tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1513                 if (tx_q->tx_skbuff_dma[i].map_as_page)
1514                         dma_unmap_page(priv->device,
1515                                        tx_q->tx_skbuff_dma[i].buf,
1516                                        tx_q->tx_skbuff_dma[i].len,
1517                                        DMA_TO_DEVICE);
1518                 else
1519                         dma_unmap_single(priv->device,
1520                                          tx_q->tx_skbuff_dma[i].buf,
1521                                          tx_q->tx_skbuff_dma[i].len,
1522                                          DMA_TO_DEVICE);
1523         }
1524
1525         if (tx_q->xdpf[i] &&
1526             (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1527              tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1528                 xdp_return_frame(tx_q->xdpf[i]);
1529                 tx_q->xdpf[i] = NULL;
1530         }
1531
1532         if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1533                 tx_q->xsk_frames_done++;
1534
1535         if (tx_q->tx_skbuff[i] &&
1536             tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1537                 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1538                 tx_q->tx_skbuff[i] = NULL;
1539         }
1540
1541         tx_q->tx_skbuff_dma[i].buf = 0;
1542         tx_q->tx_skbuff_dma[i].map_as_page = false;
1543 }
1544
1545 /**
1546  * dma_free_rx_skbufs - free RX dma buffers
1547  * @priv: private structure
1548  * @dma_conf: structure to take the dma data
1549  * @queue: RX queue index
1550  */
1551 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1552                                struct stmmac_dma_conf *dma_conf,
1553                                u32 queue)
1554 {
1555         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1556         int i;
1557
1558         for (i = 0; i < dma_conf->dma_rx_size; i++)
1559                 stmmac_free_rx_buffer(priv, rx_q, i);
1560 }
1561
1562 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1563                                    struct stmmac_dma_conf *dma_conf,
1564                                    u32 queue, gfp_t flags)
1565 {
1566         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1567         int i;
1568
1569         for (i = 0; i < dma_conf->dma_rx_size; i++) {
1570                 struct dma_desc *p;
1571                 int ret;
1572
1573                 if (priv->extend_desc)
1574                         p = &((rx_q->dma_erx + i)->basic);
1575                 else
1576                         p = rx_q->dma_rx + i;
1577
1578                 ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1579                                              queue);
1580                 if (ret)
1581                         return ret;
1582
1583                 rx_q->buf_alloc_num++;
1584         }
1585
1586         return 0;
1587 }
1588
1589 /**
1590  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1591  * @priv: private structure
1592  * @dma_conf: structure to take the dma data
1593  * @queue: RX queue index
1594  */
1595 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1596                                 struct stmmac_dma_conf *dma_conf,
1597                                 u32 queue)
1598 {
1599         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1600         int i;
1601
1602         for (i = 0; i < dma_conf->dma_rx_size; i++) {
1603                 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1604
1605                 if (!buf->xdp)
1606                         continue;
1607
1608                 xsk_buff_free(buf->xdp);
1609                 buf->xdp = NULL;
1610         }
1611 }
1612
1613 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1614                                       struct stmmac_dma_conf *dma_conf,
1615                                       u32 queue)
1616 {
1617         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1618         int i;
1619
1620         /* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1621          * in struct xdp_buff_xsk to stash driver specific information. Thus,
1622          * use this macro to make sure no size violations.
1623          */
1624         XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1625
1626         for (i = 0; i < dma_conf->dma_rx_size; i++) {
1627                 struct stmmac_rx_buffer *buf;
1628                 dma_addr_t dma_addr;
1629                 struct dma_desc *p;
1630
1631                 if (priv->extend_desc)
1632                         p = (struct dma_desc *)(rx_q->dma_erx + i);
1633                 else
1634                         p = rx_q->dma_rx + i;
1635
1636                 buf = &rx_q->buf_pool[i];
1637
1638                 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1639                 if (!buf->xdp)
1640                         return -ENOMEM;
1641
1642                 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1643                 stmmac_set_desc_addr(priv, p, dma_addr);
1644                 rx_q->buf_alloc_num++;
1645         }
1646
1647         return 0;
1648 }
1649
1650 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1651 {
1652         if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1653                 return NULL;
1654
1655         return xsk_get_pool_from_qid(priv->dev, queue);
1656 }
1657
1658 /**
1659  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1660  * @priv: driver private structure
1661  * @dma_conf: structure to take the dma data
1662  * @queue: RX queue index
1663  * @flags: gfp flag.
1664  * Description: this function initializes the DMA RX descriptors
1665  * and allocates the socket buffers. It supports the chained and ring
1666  * modes.
1667  */
1668 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1669                                     struct stmmac_dma_conf *dma_conf,
1670                                     u32 queue, gfp_t flags)
1671 {
1672         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1673         int ret;
1674
1675         netif_dbg(priv, probe, priv->dev,
1676                   "(%s) dma_rx_phy=0x%08x\n", __func__,
1677                   (u32)rx_q->dma_rx_phy);
1678
1679         stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1680
1681         xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1682
1683         rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1684
1685         if (rx_q->xsk_pool) {
1686                 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1687                                                    MEM_TYPE_XSK_BUFF_POOL,
1688                                                    NULL));
1689                 netdev_info(priv->dev,
1690                             "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1691                             rx_q->queue_index);
1692                 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1693         } else {
1694                 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1695                                                    MEM_TYPE_PAGE_POOL,
1696                                                    rx_q->page_pool));
1697                 netdev_info(priv->dev,
1698                             "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1699                             rx_q->queue_index);
1700         }
1701
1702         if (rx_q->xsk_pool) {
1703                 /* RX XDP ZC buffer pool may not be populated, e.g.
1704                  * xdpsock TX-only.
1705                  */
1706                 stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1707         } else {
1708                 ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1709                 if (ret < 0)
1710                         return -ENOMEM;
1711         }
1712
1713         /* Setup the chained descriptor addresses */
1714         if (priv->mode == STMMAC_CHAIN_MODE) {
1715                 if (priv->extend_desc)
1716                         stmmac_mode_init(priv, rx_q->dma_erx,
1717                                          rx_q->dma_rx_phy,
1718                                          dma_conf->dma_rx_size, 1);
1719                 else
1720                         stmmac_mode_init(priv, rx_q->dma_rx,
1721                                          rx_q->dma_rx_phy,
1722                                          dma_conf->dma_rx_size, 0);
1723         }
1724
1725         return 0;
1726 }
1727
1728 static int init_dma_rx_desc_rings(struct net_device *dev,
1729                                   struct stmmac_dma_conf *dma_conf,
1730                                   gfp_t flags)
1731 {
1732         struct stmmac_priv *priv = netdev_priv(dev);
1733         u32 rx_count = priv->plat->rx_queues_to_use;
1734         int queue;
1735         int ret;
1736
1737         /* RX INITIALIZATION */
1738         netif_dbg(priv, probe, priv->dev,
1739                   "SKB addresses:\nskb\t\tskb data\tdma data\n");
1740
1741         for (queue = 0; queue < rx_count; queue++) {
1742                 ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1743                 if (ret)
1744                         goto err_init_rx_buffers;
1745         }
1746
1747         return 0;
1748
1749 err_init_rx_buffers:
1750         while (queue >= 0) {
1751                 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1752
1753                 if (rx_q->xsk_pool)
1754                         dma_free_rx_xskbufs(priv, dma_conf, queue);
1755                 else
1756                         dma_free_rx_skbufs(priv, dma_conf, queue);
1757
1758                 rx_q->buf_alloc_num = 0;
1759                 rx_q->xsk_pool = NULL;
1760
1761                 queue--;
1762         }
1763
1764         return ret;
1765 }
1766
1767 /**
1768  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1769  * @priv: driver private structure
1770  * @dma_conf: structure to take the dma data
1771  * @queue: TX queue index
1772  * Description: this function initializes the DMA TX descriptors
1773  * and allocates the socket buffers. It supports the chained and ring
1774  * modes.
1775  */
1776 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1777                                     struct stmmac_dma_conf *dma_conf,
1778                                     u32 queue)
1779 {
1780         struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1781         int i;
1782
1783         netif_dbg(priv, probe, priv->dev,
1784                   "(%s) dma_tx_phy=0x%08x\n", __func__,
1785                   (u32)tx_q->dma_tx_phy);
1786
1787         /* Setup the chained descriptor addresses */
1788         if (priv->mode == STMMAC_CHAIN_MODE) {
1789                 if (priv->extend_desc)
1790                         stmmac_mode_init(priv, tx_q->dma_etx,
1791                                          tx_q->dma_tx_phy,
1792                                          dma_conf->dma_tx_size, 1);
1793                 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1794                         stmmac_mode_init(priv, tx_q->dma_tx,
1795                                          tx_q->dma_tx_phy,
1796                                          dma_conf->dma_tx_size, 0);
1797         }
1798
1799         tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1800
1801         for (i = 0; i < dma_conf->dma_tx_size; i++) {
1802                 struct dma_desc *p;
1803
1804                 if (priv->extend_desc)
1805                         p = &((tx_q->dma_etx + i)->basic);
1806                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1807                         p = &((tx_q->dma_entx + i)->basic);
1808                 else
1809                         p = tx_q->dma_tx + i;
1810
1811                 stmmac_clear_desc(priv, p);
1812
1813                 tx_q->tx_skbuff_dma[i].buf = 0;
1814                 tx_q->tx_skbuff_dma[i].map_as_page = false;
1815                 tx_q->tx_skbuff_dma[i].len = 0;
1816                 tx_q->tx_skbuff_dma[i].last_segment = false;
1817                 tx_q->tx_skbuff[i] = NULL;
1818         }
1819
1820         return 0;
1821 }
1822
1823 static int init_dma_tx_desc_rings(struct net_device *dev,
1824                                   struct stmmac_dma_conf *dma_conf)
1825 {
1826         struct stmmac_priv *priv = netdev_priv(dev);
1827         u32 tx_queue_cnt;
1828         u32 queue;
1829
1830         tx_queue_cnt = priv->plat->tx_queues_to_use;
1831
1832         for (queue = 0; queue < tx_queue_cnt; queue++)
1833                 __init_dma_tx_desc_rings(priv, dma_conf, queue);
1834
1835         return 0;
1836 }
1837
1838 /**
1839  * init_dma_desc_rings - init the RX/TX descriptor rings
1840  * @dev: net device structure
1841  * @dma_conf: structure to take the dma data
1842  * @flags: gfp flag.
1843  * Description: this function initializes the DMA RX/TX descriptors
1844  * and allocates the socket buffers. It supports the chained and ring
1845  * modes.
1846  */
1847 static int init_dma_desc_rings(struct net_device *dev,
1848                                struct stmmac_dma_conf *dma_conf,
1849                                gfp_t flags)
1850 {
1851         struct stmmac_priv *priv = netdev_priv(dev);
1852         int ret;
1853
1854         ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1855         if (ret)
1856                 return ret;
1857
1858         ret = init_dma_tx_desc_rings(dev, dma_conf);
1859
1860         stmmac_clear_descriptors(priv, dma_conf);
1861
1862         if (netif_msg_hw(priv))
1863                 stmmac_display_rings(priv, dma_conf);
1864
1865         return ret;
1866 }
1867
1868 /**
1869  * dma_free_tx_skbufs - free TX dma buffers
1870  * @priv: private structure
1871  * @dma_conf: structure to take the dma data
1872  * @queue: TX queue index
1873  */
1874 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1875                                struct stmmac_dma_conf *dma_conf,
1876                                u32 queue)
1877 {
1878         struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1879         int i;
1880
1881         tx_q->xsk_frames_done = 0;
1882
1883         for (i = 0; i < dma_conf->dma_tx_size; i++)
1884                 stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1885
1886         if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1887                 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1888                 tx_q->xsk_frames_done = 0;
1889                 tx_q->xsk_pool = NULL;
1890         }
1891 }
1892
1893 /**
1894  * stmmac_free_tx_skbufs - free TX skb buffers
1895  * @priv: private structure
1896  */
1897 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1898 {
1899         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1900         u32 queue;
1901
1902         for (queue = 0; queue < tx_queue_cnt; queue++)
1903                 dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1904 }
1905
1906 /**
1907  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1908  * @priv: private structure
1909  * @dma_conf: structure to take the dma data
1910  * @queue: RX queue index
1911  */
1912 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1913                                          struct stmmac_dma_conf *dma_conf,
1914                                          u32 queue)
1915 {
1916         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1917
1918         /* Release the DMA RX socket buffers */
1919         if (rx_q->xsk_pool)
1920                 dma_free_rx_xskbufs(priv, dma_conf, queue);
1921         else
1922                 dma_free_rx_skbufs(priv, dma_conf, queue);
1923
1924         rx_q->buf_alloc_num = 0;
1925         rx_q->xsk_pool = NULL;
1926
1927         /* Free DMA regions of consistent memory previously allocated */
1928         if (!priv->extend_desc)
1929                 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1930                                   sizeof(struct dma_desc),
1931                                   rx_q->dma_rx, rx_q->dma_rx_phy);
1932         else
1933                 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1934                                   sizeof(struct dma_extended_desc),
1935                                   rx_q->dma_erx, rx_q->dma_rx_phy);
1936
1937         if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1938                 xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1939
1940         kfree(rx_q->buf_pool);
1941         if (rx_q->page_pool)
1942                 page_pool_destroy(rx_q->page_pool);
1943 }
1944
1945 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1946                                        struct stmmac_dma_conf *dma_conf)
1947 {
1948         u32 rx_count = priv->plat->rx_queues_to_use;
1949         u32 queue;
1950
1951         /* Free RX queue resources */
1952         for (queue = 0; queue < rx_count; queue++)
1953                 __free_dma_rx_desc_resources(priv, dma_conf, queue);
1954 }
1955
1956 /**
1957  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1958  * @priv: private structure
1959  * @dma_conf: structure to take the dma data
1960  * @queue: TX queue index
1961  */
1962 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1963                                          struct stmmac_dma_conf *dma_conf,
1964                                          u32 queue)
1965 {
1966         struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1967         size_t size;
1968         void *addr;
1969
1970         /* Release the DMA TX socket buffers */
1971         dma_free_tx_skbufs(priv, dma_conf, queue);
1972
1973         if (priv->extend_desc) {
1974                 size = sizeof(struct dma_extended_desc);
1975                 addr = tx_q->dma_etx;
1976         } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1977                 size = sizeof(struct dma_edesc);
1978                 addr = tx_q->dma_entx;
1979         } else {
1980                 size = sizeof(struct dma_desc);
1981                 addr = tx_q->dma_tx;
1982         }
1983
1984         size *= dma_conf->dma_tx_size;
1985
1986         dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1987
1988         kfree(tx_q->tx_skbuff_dma);
1989         kfree(tx_q->tx_skbuff);
1990 }
1991
1992 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
1993                                        struct stmmac_dma_conf *dma_conf)
1994 {
1995         u32 tx_count = priv->plat->tx_queues_to_use;
1996         u32 queue;
1997
1998         /* Free TX queue resources */
1999         for (queue = 0; queue < tx_count; queue++)
2000                 __free_dma_tx_desc_resources(priv, dma_conf, queue);
2001 }
2002
2003 /**
2004  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2005  * @priv: private structure
2006  * @dma_conf: structure to take the dma data
2007  * @queue: RX queue index
2008  * Description: according to which descriptor can be used (extend or basic)
2009  * this function allocates the resources for TX and RX paths. In case of
2010  * reception, for example, it pre-allocated the RX socket buffer in order to
2011  * allow zero-copy mechanism.
2012  */
2013 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2014                                          struct stmmac_dma_conf *dma_conf,
2015                                          u32 queue)
2016 {
2017         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2018         struct stmmac_channel *ch = &priv->channel[queue];
2019         bool xdp_prog = stmmac_xdp_is_enabled(priv);
2020         struct page_pool_params pp_params = { 0 };
2021         unsigned int num_pages;
2022         unsigned int napi_id;
2023         int ret;
2024
2025         rx_q->queue_index = queue;
2026         rx_q->priv_data = priv;
2027
2028         pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2029         pp_params.pool_size = dma_conf->dma_rx_size;
2030         num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2031         pp_params.order = ilog2(num_pages);
2032         pp_params.nid = dev_to_node(priv->device);
2033         pp_params.dev = priv->device;
2034         pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2035         pp_params.offset = stmmac_rx_offset(priv);
2036         pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2037
2038         rx_q->page_pool = page_pool_create(&pp_params);
2039         if (IS_ERR(rx_q->page_pool)) {
2040                 ret = PTR_ERR(rx_q->page_pool);
2041                 rx_q->page_pool = NULL;
2042                 return ret;
2043         }
2044
2045         rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2046                                  sizeof(*rx_q->buf_pool),
2047                                  GFP_KERNEL);
2048         if (!rx_q->buf_pool)
2049                 return -ENOMEM;
2050
2051         if (priv->extend_desc) {
2052                 rx_q->dma_erx = dma_alloc_coherent(priv->device,
2053                                                    dma_conf->dma_rx_size *
2054                                                    sizeof(struct dma_extended_desc),
2055                                                    &rx_q->dma_rx_phy,
2056                                                    GFP_KERNEL);
2057                 if (!rx_q->dma_erx)
2058                         return -ENOMEM;
2059
2060         } else {
2061                 rx_q->dma_rx = dma_alloc_coherent(priv->device,
2062                                                   dma_conf->dma_rx_size *
2063                                                   sizeof(struct dma_desc),
2064                                                   &rx_q->dma_rx_phy,
2065                                                   GFP_KERNEL);
2066                 if (!rx_q->dma_rx)
2067                         return -ENOMEM;
2068         }
2069
2070         if (stmmac_xdp_is_enabled(priv) &&
2071             test_bit(queue, priv->af_xdp_zc_qps))
2072                 napi_id = ch->rxtx_napi.napi_id;
2073         else
2074                 napi_id = ch->rx_napi.napi_id;
2075
2076         ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2077                                rx_q->queue_index,
2078                                napi_id);
2079         if (ret) {
2080                 netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2081                 return -EINVAL;
2082         }
2083
2084         return 0;
2085 }
2086
2087 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2088                                        struct stmmac_dma_conf *dma_conf)
2089 {
2090         u32 rx_count = priv->plat->rx_queues_to_use;
2091         u32 queue;
2092         int ret;
2093
2094         /* RX queues buffers and DMA */
2095         for (queue = 0; queue < rx_count; queue++) {
2096                 ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2097                 if (ret)
2098                         goto err_dma;
2099         }
2100
2101         return 0;
2102
2103 err_dma:
2104         free_dma_rx_desc_resources(priv, dma_conf);
2105
2106         return ret;
2107 }
2108
2109 /**
2110  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2111  * @priv: private structure
2112  * @dma_conf: structure to take the dma data
2113  * @queue: TX queue index
2114  * Description: according to which descriptor can be used (extend or basic)
2115  * this function allocates the resources for TX and RX paths. In case of
2116  * reception, for example, it pre-allocated the RX socket buffer in order to
2117  * allow zero-copy mechanism.
2118  */
2119 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2120                                          struct stmmac_dma_conf *dma_conf,
2121                                          u32 queue)
2122 {
2123         struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2124         size_t size;
2125         void *addr;
2126
2127         tx_q->queue_index = queue;
2128         tx_q->priv_data = priv;
2129
2130         tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2131                                       sizeof(*tx_q->tx_skbuff_dma),
2132                                       GFP_KERNEL);
2133         if (!tx_q->tx_skbuff_dma)
2134                 return -ENOMEM;
2135
2136         tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2137                                   sizeof(struct sk_buff *),
2138                                   GFP_KERNEL);
2139         if (!tx_q->tx_skbuff)
2140                 return -ENOMEM;
2141
2142         if (priv->extend_desc)
2143                 size = sizeof(struct dma_extended_desc);
2144         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2145                 size = sizeof(struct dma_edesc);
2146         else
2147                 size = sizeof(struct dma_desc);
2148
2149         size *= dma_conf->dma_tx_size;
2150
2151         addr = dma_alloc_coherent(priv->device, size,
2152                                   &tx_q->dma_tx_phy, GFP_KERNEL);
2153         if (!addr)
2154                 return -ENOMEM;
2155
2156         if (priv->extend_desc)
2157                 tx_q->dma_etx = addr;
2158         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2159                 tx_q->dma_entx = addr;
2160         else
2161                 tx_q->dma_tx = addr;
2162
2163         return 0;
2164 }
2165
2166 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2167                                        struct stmmac_dma_conf *dma_conf)
2168 {
2169         u32 tx_count = priv->plat->tx_queues_to_use;
2170         u32 queue;
2171         int ret;
2172
2173         /* TX queues buffers and DMA */
2174         for (queue = 0; queue < tx_count; queue++) {
2175                 ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2176                 if (ret)
2177                         goto err_dma;
2178         }
2179
2180         return 0;
2181
2182 err_dma:
2183         free_dma_tx_desc_resources(priv, dma_conf);
2184         return ret;
2185 }
2186
2187 /**
2188  * alloc_dma_desc_resources - alloc TX/RX resources.
2189  * @priv: private structure
2190  * @dma_conf: structure to take the dma data
2191  * Description: according to which descriptor can be used (extend or basic)
2192  * this function allocates the resources for TX and RX paths. In case of
2193  * reception, for example, it pre-allocated the RX socket buffer in order to
2194  * allow zero-copy mechanism.
2195  */
2196 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2197                                     struct stmmac_dma_conf *dma_conf)
2198 {
2199         /* RX Allocation */
2200         int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2201
2202         if (ret)
2203                 return ret;
2204
2205         ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2206
2207         return ret;
2208 }
2209
2210 /**
2211  * free_dma_desc_resources - free dma desc resources
2212  * @priv: private structure
2213  * @dma_conf: structure to take the dma data
2214  */
2215 static void free_dma_desc_resources(struct stmmac_priv *priv,
2216                                     struct stmmac_dma_conf *dma_conf)
2217 {
2218         /* Release the DMA TX socket buffers */
2219         free_dma_tx_desc_resources(priv, dma_conf);
2220
2221         /* Release the DMA RX socket buffers later
2222          * to ensure all pending XDP_TX buffers are returned.
2223          */
2224         free_dma_rx_desc_resources(priv, dma_conf);
2225 }
2226
2227 /**
2228  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2229  *  @priv: driver private structure
2230  *  Description: It is used for enabling the rx queues in the MAC
2231  */
2232 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2233 {
2234         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2235         int queue;
2236         u8 mode;
2237
2238         for (queue = 0; queue < rx_queues_count; queue++) {
2239                 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2240                 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2241         }
2242 }
2243
2244 /**
2245  * stmmac_start_rx_dma - start RX DMA channel
2246  * @priv: driver private structure
2247  * @chan: RX channel index
2248  * Description:
2249  * This starts a RX DMA channel
2250  */
2251 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2252 {
2253         netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2254         stmmac_start_rx(priv, priv->ioaddr, chan);
2255 }
2256
2257 /**
2258  * stmmac_start_tx_dma - start TX DMA channel
2259  * @priv: driver private structure
2260  * @chan: TX channel index
2261  * Description:
2262  * This starts a TX DMA channel
2263  */
2264 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2265 {
2266         netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2267         stmmac_start_tx(priv, priv->ioaddr, chan);
2268 }
2269
2270 /**
2271  * stmmac_stop_rx_dma - stop RX DMA channel
2272  * @priv: driver private structure
2273  * @chan: RX channel index
2274  * Description:
2275  * This stops a RX DMA channel
2276  */
2277 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2278 {
2279         netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2280         stmmac_stop_rx(priv, priv->ioaddr, chan);
2281 }
2282
2283 /**
2284  * stmmac_stop_tx_dma - stop TX DMA channel
2285  * @priv: driver private structure
2286  * @chan: TX channel index
2287  * Description:
2288  * This stops a TX DMA channel
2289  */
2290 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2291 {
2292         netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2293         stmmac_stop_tx(priv, priv->ioaddr, chan);
2294 }
2295
2296 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2297 {
2298         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2299         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2300         u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2301         u32 chan;
2302
2303         for (chan = 0; chan < dma_csr_ch; chan++) {
2304                 struct stmmac_channel *ch = &priv->channel[chan];
2305                 unsigned long flags;
2306
2307                 spin_lock_irqsave(&ch->lock, flags);
2308                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2309                 spin_unlock_irqrestore(&ch->lock, flags);
2310         }
2311 }
2312
2313 /**
2314  * stmmac_start_all_dma - start all RX and TX DMA channels
2315  * @priv: driver private structure
2316  * Description:
2317  * This starts all the RX and TX DMA channels
2318  */
2319 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2320 {
2321         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2322         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2323         u32 chan = 0;
2324
2325         for (chan = 0; chan < rx_channels_count; chan++)
2326                 stmmac_start_rx_dma(priv, chan);
2327
2328         for (chan = 0; chan < tx_channels_count; chan++)
2329                 stmmac_start_tx_dma(priv, chan);
2330 }
2331
2332 /**
2333  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2334  * @priv: driver private structure
2335  * Description:
2336  * This stops the RX and TX DMA channels
2337  */
2338 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2339 {
2340         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2341         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2342         u32 chan = 0;
2343
2344         for (chan = 0; chan < rx_channels_count; chan++)
2345                 stmmac_stop_rx_dma(priv, chan);
2346
2347         for (chan = 0; chan < tx_channels_count; chan++)
2348                 stmmac_stop_tx_dma(priv, chan);
2349 }
2350
2351 /**
2352  *  stmmac_dma_operation_mode - HW DMA operation mode
2353  *  @priv: driver private structure
2354  *  Description: it is used for configuring the DMA operation mode register in
2355  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2356  */
2357 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2358 {
2359         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2360         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2361         int rxfifosz = priv->plat->rx_fifo_size;
2362         int txfifosz = priv->plat->tx_fifo_size;
2363         u32 txmode = 0;
2364         u32 rxmode = 0;
2365         u32 chan = 0;
2366         u8 qmode = 0;
2367
2368         if (rxfifosz == 0)
2369                 rxfifosz = priv->dma_cap.rx_fifo_size;
2370         if (txfifosz == 0)
2371                 txfifosz = priv->dma_cap.tx_fifo_size;
2372
2373         /* Adjust for real per queue fifo size */
2374         rxfifosz /= rx_channels_count;
2375         txfifosz /= tx_channels_count;
2376
2377         if (priv->plat->force_thresh_dma_mode) {
2378                 txmode = tc;
2379                 rxmode = tc;
2380         } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2381                 /*
2382                  * In case of GMAC, SF mode can be enabled
2383                  * to perform the TX COE in HW. This depends on:
2384                  * 1) TX COE if actually supported
2385                  * 2) There is no bugged Jumbo frame support
2386                  *    that needs to not insert csum in the TDES.
2387                  */
2388                 txmode = SF_DMA_MODE;
2389                 rxmode = SF_DMA_MODE;
2390                 priv->xstats.threshold = SF_DMA_MODE;
2391         } else {
2392                 txmode = tc;
2393                 rxmode = SF_DMA_MODE;
2394         }
2395
2396         /* configure all channels */
2397         for (chan = 0; chan < rx_channels_count; chan++) {
2398                 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2399                 u32 buf_size;
2400
2401                 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2402
2403                 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2404                                 rxfifosz, qmode);
2405
2406                 if (rx_q->xsk_pool) {
2407                         buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2408                         stmmac_set_dma_bfsize(priv, priv->ioaddr,
2409                                               buf_size,
2410                                               chan);
2411                 } else {
2412                         stmmac_set_dma_bfsize(priv, priv->ioaddr,
2413                                               priv->dma_conf.dma_buf_sz,
2414                                               chan);
2415                 }
2416         }
2417
2418         for (chan = 0; chan < tx_channels_count; chan++) {
2419                 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2420
2421                 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2422                                 txfifosz, qmode);
2423         }
2424 }
2425
2426 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2427 {
2428         struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2429         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2430         struct xsk_buff_pool *pool = tx_q->xsk_pool;
2431         unsigned int entry = tx_q->cur_tx;
2432         struct dma_desc *tx_desc = NULL;
2433         struct xdp_desc xdp_desc;
2434         bool work_done = true;
2435
2436         /* Avoids TX time-out as we are sharing with slow path */
2437         txq_trans_cond_update(nq);
2438
2439         budget = min(budget, stmmac_tx_avail(priv, queue));
2440
2441         while (budget-- > 0) {
2442                 dma_addr_t dma_addr;
2443                 bool set_ic;
2444
2445                 /* We are sharing with slow path and stop XSK TX desc submission when
2446                  * available TX ring is less than threshold.
2447                  */
2448                 if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2449                     !netif_carrier_ok(priv->dev)) {
2450                         work_done = false;
2451                         break;
2452                 }
2453
2454                 if (!xsk_tx_peek_desc(pool, &xdp_desc))
2455                         break;
2456
2457                 if (likely(priv->extend_desc))
2458                         tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2459                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2460                         tx_desc = &tx_q->dma_entx[entry].basic;
2461                 else
2462                         tx_desc = tx_q->dma_tx + entry;
2463
2464                 dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2465                 xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2466
2467                 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2468
2469                 /* To return XDP buffer to XSK pool, we simple call
2470                  * xsk_tx_completed(), so we don't need to fill up
2471                  * 'buf' and 'xdpf'.
2472                  */
2473                 tx_q->tx_skbuff_dma[entry].buf = 0;
2474                 tx_q->xdpf[entry] = NULL;
2475
2476                 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2477                 tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2478                 tx_q->tx_skbuff_dma[entry].last_segment = true;
2479                 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2480
2481                 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2482
2483                 tx_q->tx_count_frames++;
2484
2485                 if (!priv->tx_coal_frames[queue])
2486                         set_ic = false;
2487                 else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2488                         set_ic = true;
2489                 else
2490                         set_ic = false;
2491
2492                 if (set_ic) {
2493                         tx_q->tx_count_frames = 0;
2494                         stmmac_set_tx_ic(priv, tx_desc);
2495                         priv->xstats.tx_set_ic_bit++;
2496                 }
2497
2498                 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2499                                        true, priv->mode, true, true,
2500                                        xdp_desc.len);
2501
2502                 stmmac_enable_dma_transmission(priv, priv->ioaddr);
2503
2504                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2505                 entry = tx_q->cur_tx;
2506         }
2507
2508         if (tx_desc) {
2509                 stmmac_flush_tx_descriptors(priv, queue);
2510                 xsk_tx_release(pool);
2511         }
2512
2513         /* Return true if all of the 3 conditions are met
2514          *  a) TX Budget is still available
2515          *  b) work_done = true when XSK TX desc peek is empty (no more
2516          *     pending XSK TX for transmission)
2517          */
2518         return !!budget && work_done;
2519 }
2520
2521 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2522 {
2523         if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2524                 tc += 64;
2525
2526                 if (priv->plat->force_thresh_dma_mode)
2527                         stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2528                 else
2529                         stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2530                                                       chan);
2531
2532                 priv->xstats.threshold = tc;
2533         }
2534 }
2535
2536 /**
2537  * stmmac_tx_clean - to manage the transmission completion
2538  * @priv: driver private structure
2539  * @budget: napi budget limiting this functions packet handling
2540  * @queue: TX queue index
2541  * Description: it reclaims the transmit resources after transmission completes.
2542  */
2543 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
2544 {
2545         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2546         unsigned int bytes_compl = 0, pkts_compl = 0;
2547         unsigned int entry, xmits = 0, count = 0;
2548
2549         __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2550
2551         priv->xstats.tx_clean++;
2552
2553         tx_q->xsk_frames_done = 0;
2554
2555         entry = tx_q->dirty_tx;
2556
2557         /* Try to clean all TX complete frame in 1 shot */
2558         while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2559                 struct xdp_frame *xdpf;
2560                 struct sk_buff *skb;
2561                 struct dma_desc *p;
2562                 int status;
2563
2564                 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2565                     tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2566                         xdpf = tx_q->xdpf[entry];
2567                         skb = NULL;
2568                 } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2569                         xdpf = NULL;
2570                         skb = tx_q->tx_skbuff[entry];
2571                 } else {
2572                         xdpf = NULL;
2573                         skb = NULL;
2574                 }
2575
2576                 if (priv->extend_desc)
2577                         p = (struct dma_desc *)(tx_q->dma_etx + entry);
2578                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2579                         p = &tx_q->dma_entx[entry].basic;
2580                 else
2581                         p = tx_q->dma_tx + entry;
2582
2583                 status = stmmac_tx_status(priv, &priv->dev->stats,
2584                                 &priv->xstats, p, priv->ioaddr);
2585                 /* Check if the descriptor is owned by the DMA */
2586                 if (unlikely(status & tx_dma_own))
2587                         break;
2588
2589                 count++;
2590
2591                 /* Make sure descriptor fields are read after reading
2592                  * the own bit.
2593                  */
2594                 dma_rmb();
2595
2596                 /* Just consider the last segment and ...*/
2597                 if (likely(!(status & tx_not_ls))) {
2598                         /* ... verify the status error condition */
2599                         if (unlikely(status & tx_err)) {
2600                                 priv->dev->stats.tx_errors++;
2601                                 if (unlikely(status & tx_err_bump_tc))
2602                                         stmmac_bump_dma_threshold(priv, queue);
2603                         } else {
2604                                 priv->dev->stats.tx_packets++;
2605                                 priv->xstats.tx_pkt_n++;
2606                                 priv->xstats.txq_stats[queue].tx_pkt_n++;
2607                         }
2608                         if (skb)
2609                                 stmmac_get_tx_hwtstamp(priv, p, skb);
2610                 }
2611
2612                 if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2613                            tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2614                         if (tx_q->tx_skbuff_dma[entry].map_as_page)
2615                                 dma_unmap_page(priv->device,
2616                                                tx_q->tx_skbuff_dma[entry].buf,
2617                                                tx_q->tx_skbuff_dma[entry].len,
2618                                                DMA_TO_DEVICE);
2619                         else
2620                                 dma_unmap_single(priv->device,
2621                                                  tx_q->tx_skbuff_dma[entry].buf,
2622                                                  tx_q->tx_skbuff_dma[entry].len,
2623                                                  DMA_TO_DEVICE);
2624                         tx_q->tx_skbuff_dma[entry].buf = 0;
2625                         tx_q->tx_skbuff_dma[entry].len = 0;
2626                         tx_q->tx_skbuff_dma[entry].map_as_page = false;
2627                 }
2628
2629                 stmmac_clean_desc3(priv, tx_q, p);
2630
2631                 tx_q->tx_skbuff_dma[entry].last_segment = false;
2632                 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2633
2634                 if (xdpf &&
2635                     tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2636                         xdp_return_frame_rx_napi(xdpf);
2637                         tx_q->xdpf[entry] = NULL;
2638                 }
2639
2640                 if (xdpf &&
2641                     tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2642                         xdp_return_frame(xdpf);
2643                         tx_q->xdpf[entry] = NULL;
2644                 }
2645
2646                 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2647                         tx_q->xsk_frames_done++;
2648
2649                 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2650                         if (likely(skb)) {
2651                                 pkts_compl++;
2652                                 bytes_compl += skb->len;
2653                                 dev_consume_skb_any(skb);
2654                                 tx_q->tx_skbuff[entry] = NULL;
2655                         }
2656                 }
2657
2658                 stmmac_release_tx_desc(priv, p, priv->mode);
2659
2660                 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2661         }
2662         tx_q->dirty_tx = entry;
2663
2664         netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2665                                   pkts_compl, bytes_compl);
2666
2667         if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2668                                                                 queue))) &&
2669             stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2670
2671                 netif_dbg(priv, tx_done, priv->dev,
2672                           "%s: restart transmit\n", __func__);
2673                 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2674         }
2675
2676         if (tx_q->xsk_pool) {
2677                 bool work_done;
2678
2679                 if (tx_q->xsk_frames_done)
2680                         xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2681
2682                 if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2683                         xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2684
2685                 /* For XSK TX, we try to send as many as possible.
2686                  * If XSK work done (XSK TX desc empty and budget still
2687                  * available), return "budget - 1" to reenable TX IRQ.
2688                  * Else, return "budget" to make NAPI continue polling.
2689                  */
2690                 work_done = stmmac_xdp_xmit_zc(priv, queue,
2691                                                STMMAC_XSK_TX_BUDGET_MAX);
2692                 if (work_done)
2693                         xmits = budget - 1;
2694                 else
2695                         xmits = budget;
2696         }
2697
2698         if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2699             priv->eee_sw_timer_en) {
2700                 if (stmmac_enable_eee_mode(priv))
2701                         mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2702         }
2703
2704         /* We still have pending packets, let's call for a new scheduling */
2705         if (tx_q->dirty_tx != tx_q->cur_tx)
2706                 hrtimer_start(&tx_q->txtimer,
2707                               STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2708                               HRTIMER_MODE_REL);
2709
2710         __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2711
2712         /* Combine decisions from TX clean and XSK TX */
2713         return max(count, xmits);
2714 }
2715
2716 /**
2717  * stmmac_tx_err - to manage the tx error
2718  * @priv: driver private structure
2719  * @chan: channel index
2720  * Description: it cleans the descriptors and restarts the transmission
2721  * in case of transmission errors.
2722  */
2723 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2724 {
2725         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2726
2727         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2728
2729         stmmac_stop_tx_dma(priv, chan);
2730         dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2731         stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2732         stmmac_reset_tx_queue(priv, chan);
2733         stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2734                             tx_q->dma_tx_phy, chan);
2735         stmmac_start_tx_dma(priv, chan);
2736
2737         priv->dev->stats.tx_errors++;
2738         netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2739 }
2740
2741 /**
2742  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2743  *  @priv: driver private structure
2744  *  @txmode: TX operating mode
2745  *  @rxmode: RX operating mode
2746  *  @chan: channel index
2747  *  Description: it is used for configuring of the DMA operation mode in
2748  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2749  *  mode.
2750  */
2751 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2752                                           u32 rxmode, u32 chan)
2753 {
2754         u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2755         u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2756         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2757         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2758         int rxfifosz = priv->plat->rx_fifo_size;
2759         int txfifosz = priv->plat->tx_fifo_size;
2760
2761         if (rxfifosz == 0)
2762                 rxfifosz = priv->dma_cap.rx_fifo_size;
2763         if (txfifosz == 0)
2764                 txfifosz = priv->dma_cap.tx_fifo_size;
2765
2766         /* Adjust for real per queue fifo size */
2767         rxfifosz /= rx_channels_count;
2768         txfifosz /= tx_channels_count;
2769
2770         stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2771         stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2772 }
2773
2774 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2775 {
2776         int ret;
2777
2778         ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2779                         priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2780         if (ret && (ret != -EINVAL)) {
2781                 stmmac_global_err(priv);
2782                 return true;
2783         }
2784
2785         return false;
2786 }
2787
2788 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2789 {
2790         int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2791                                                  &priv->xstats, chan, dir);
2792         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2793         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2794         struct stmmac_channel *ch = &priv->channel[chan];
2795         struct napi_struct *rx_napi;
2796         struct napi_struct *tx_napi;
2797         unsigned long flags;
2798
2799         rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2800         tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2801
2802         if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2803                 if (napi_schedule_prep(rx_napi)) {
2804                         spin_lock_irqsave(&ch->lock, flags);
2805                         stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2806                         spin_unlock_irqrestore(&ch->lock, flags);
2807                         __napi_schedule(rx_napi);
2808                 }
2809         }
2810
2811         if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2812                 if (napi_schedule_prep(tx_napi)) {
2813                         spin_lock_irqsave(&ch->lock, flags);
2814                         stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2815                         spin_unlock_irqrestore(&ch->lock, flags);
2816                         __napi_schedule(tx_napi);
2817                 }
2818         }
2819
2820         return status;
2821 }
2822
2823 /**
2824  * stmmac_dma_interrupt - DMA ISR
2825  * @priv: driver private structure
2826  * Description: this is the DMA ISR. It is called by the main ISR.
2827  * It calls the dwmac dma routine and schedule poll method in case of some
2828  * work can be done.
2829  */
2830 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2831 {
2832         u32 tx_channel_count = priv->plat->tx_queues_to_use;
2833         u32 rx_channel_count = priv->plat->rx_queues_to_use;
2834         u32 channels_to_check = tx_channel_count > rx_channel_count ?
2835                                 tx_channel_count : rx_channel_count;
2836         u32 chan;
2837         int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2838
2839         /* Make sure we never check beyond our status buffer. */
2840         if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2841                 channels_to_check = ARRAY_SIZE(status);
2842
2843         for (chan = 0; chan < channels_to_check; chan++)
2844                 status[chan] = stmmac_napi_check(priv, chan,
2845                                                  DMA_DIR_RXTX);
2846
2847         for (chan = 0; chan < tx_channel_count; chan++) {
2848                 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2849                         /* Try to bump up the dma threshold on this failure */
2850                         stmmac_bump_dma_threshold(priv, chan);
2851                 } else if (unlikely(status[chan] == tx_hard_error)) {
2852                         stmmac_tx_err(priv, chan);
2853                 }
2854         }
2855 }
2856
2857 /**
2858  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2859  * @priv: driver private structure
2860  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2861  */
2862 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2863 {
2864         unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2865                             MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2866
2867         stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2868
2869         if (priv->dma_cap.rmon) {
2870                 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2871                 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2872         } else
2873                 netdev_info(priv->dev, "No MAC Management Counters available\n");
2874 }
2875
2876 /**
2877  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2878  * @priv: driver private structure
2879  * Description:
2880  *  new GMAC chip generations have a new register to indicate the
2881  *  presence of the optional feature/functions.
2882  *  This can be also used to override the value passed through the
2883  *  platform and necessary for old MAC10/100 and GMAC chips.
2884  */
2885 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2886 {
2887         return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2888 }
2889
2890 /**
2891  * stmmac_check_ether_addr - check if the MAC addr is valid
2892  * @priv: driver private structure
2893  * Description:
2894  * it is to verify if the MAC address is valid, in case of failures it
2895  * generates a random MAC address
2896  */
2897 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2898 {
2899         u8 addr[ETH_ALEN];
2900
2901         if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2902                 stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2903                 if (is_valid_ether_addr(addr))
2904                         eth_hw_addr_set(priv->dev, addr);
2905                 else
2906                         eth_hw_addr_random(priv->dev);
2907                 dev_info(priv->device, "device MAC address %pM\n",
2908                          priv->dev->dev_addr);
2909         }
2910 }
2911
2912 /**
2913  * stmmac_init_dma_engine - DMA init.
2914  * @priv: driver private structure
2915  * Description:
2916  * It inits the DMA invoking the specific MAC/GMAC callback.
2917  * Some DMA parameters can be passed from the platform;
2918  * in case of these are not passed a default is kept for the MAC or GMAC.
2919  */
2920 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2921 {
2922         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2923         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2924         u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2925         struct stmmac_rx_queue *rx_q;
2926         struct stmmac_tx_queue *tx_q;
2927         u32 chan = 0;
2928         int atds = 0;
2929         int ret = 0;
2930
2931         if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2932                 dev_err(priv->device, "Invalid DMA configuration\n");
2933                 return -EINVAL;
2934         }
2935
2936         if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2937                 atds = 1;
2938
2939         ret = stmmac_reset(priv, priv->ioaddr);
2940         if (ret) {
2941                 dev_err(priv->device, "Failed to reset the dma\n");
2942                 return ret;
2943         }
2944
2945         /* DMA Configuration */
2946         stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2947
2948         if (priv->plat->axi)
2949                 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2950
2951         /* DMA CSR Channel configuration */
2952         for (chan = 0; chan < dma_csr_ch; chan++) {
2953                 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2954                 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2955         }
2956
2957         /* DMA RX Channel Configuration */
2958         for (chan = 0; chan < rx_channels_count; chan++) {
2959                 rx_q = &priv->dma_conf.rx_queue[chan];
2960
2961                 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2962                                     rx_q->dma_rx_phy, chan);
2963
2964                 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2965                                      (rx_q->buf_alloc_num *
2966                                       sizeof(struct dma_desc));
2967                 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2968                                        rx_q->rx_tail_addr, chan);
2969         }
2970
2971         /* DMA TX Channel Configuration */
2972         for (chan = 0; chan < tx_channels_count; chan++) {
2973                 tx_q = &priv->dma_conf.tx_queue[chan];
2974
2975                 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2976                                     tx_q->dma_tx_phy, chan);
2977
2978                 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2979                 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2980                                        tx_q->tx_tail_addr, chan);
2981         }
2982
2983         return ret;
2984 }
2985
2986 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2987 {
2988         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2989
2990         hrtimer_start(&tx_q->txtimer,
2991                       STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2992                       HRTIMER_MODE_REL);
2993 }
2994
2995 /**
2996  * stmmac_tx_timer - mitigation sw timer for tx.
2997  * @t: data pointer
2998  * Description:
2999  * This is the timer handler to directly invoke the stmmac_tx_clean.
3000  */
3001 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3002 {
3003         struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3004         struct stmmac_priv *priv = tx_q->priv_data;
3005         struct stmmac_channel *ch;
3006         struct napi_struct *napi;
3007
3008         ch = &priv->channel[tx_q->queue_index];
3009         napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3010
3011         if (likely(napi_schedule_prep(napi))) {
3012                 unsigned long flags;
3013
3014                 spin_lock_irqsave(&ch->lock, flags);
3015                 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3016                 spin_unlock_irqrestore(&ch->lock, flags);
3017                 __napi_schedule(napi);
3018         }
3019
3020         return HRTIMER_NORESTART;
3021 }
3022
3023 /**
3024  * stmmac_init_coalesce - init mitigation options.
3025  * @priv: driver private structure
3026  * Description:
3027  * This inits the coalesce parameters: i.e. timer rate,
3028  * timer handler and default threshold used for enabling the
3029  * interrupt on completion bit.
3030  */
3031 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3032 {
3033         u32 tx_channel_count = priv->plat->tx_queues_to_use;
3034         u32 rx_channel_count = priv->plat->rx_queues_to_use;
3035         u32 chan;
3036
3037         for (chan = 0; chan < tx_channel_count; chan++) {
3038                 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3039
3040                 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3041                 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3042
3043                 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3044                 tx_q->txtimer.function = stmmac_tx_timer;
3045         }
3046
3047         for (chan = 0; chan < rx_channel_count; chan++)
3048                 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3049 }
3050
3051 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3052 {
3053         u32 rx_channels_count = priv->plat->rx_queues_to_use;
3054         u32 tx_channels_count = priv->plat->tx_queues_to_use;
3055         u32 chan;
3056
3057         /* set TX ring length */
3058         for (chan = 0; chan < tx_channels_count; chan++)
3059                 stmmac_set_tx_ring_len(priv, priv->ioaddr,
3060                                        (priv->dma_conf.dma_tx_size - 1), chan);
3061
3062         /* set RX ring length */
3063         for (chan = 0; chan < rx_channels_count; chan++)
3064                 stmmac_set_rx_ring_len(priv, priv->ioaddr,
3065                                        (priv->dma_conf.dma_rx_size - 1), chan);
3066 }
3067
3068 /**
3069  *  stmmac_set_tx_queue_weight - Set TX queue weight
3070  *  @priv: driver private structure
3071  *  Description: It is used for setting TX queues weight
3072  */
3073 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3074 {
3075         u32 tx_queues_count = priv->plat->tx_queues_to_use;
3076         u32 weight;
3077         u32 queue;
3078
3079         for (queue = 0; queue < tx_queues_count; queue++) {
3080                 weight = priv->plat->tx_queues_cfg[queue].weight;
3081                 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3082         }
3083 }
3084
3085 /**
3086  *  stmmac_configure_cbs - Configure CBS in TX queue
3087  *  @priv: driver private structure
3088  *  Description: It is used for configuring CBS in AVB TX queues
3089  */
3090 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3091 {
3092         u32 tx_queues_count = priv->plat->tx_queues_to_use;
3093         u32 mode_to_use;
3094         u32 queue;
3095
3096         /* queue 0 is reserved for legacy traffic */
3097         for (queue = 1; queue < tx_queues_count; queue++) {
3098                 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3099                 if (mode_to_use == MTL_QUEUE_DCB)
3100                         continue;
3101
3102                 stmmac_config_cbs(priv, priv->hw,
3103                                 priv->plat->tx_queues_cfg[queue].send_slope,
3104                                 priv->plat->tx_queues_cfg[queue].idle_slope,
3105                                 priv->plat->tx_queues_cfg[queue].high_credit,
3106                                 priv->plat->tx_queues_cfg[queue].low_credit,
3107                                 queue);
3108         }
3109 }
3110
3111 /**
3112  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3113  *  @priv: driver private structure
3114  *  Description: It is used for mapping RX queues to RX dma channels
3115  */
3116 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3117 {
3118         u32 rx_queues_count = priv->plat->rx_queues_to_use;
3119         u32 queue;
3120         u32 chan;
3121
3122         for (queue = 0; queue < rx_queues_count; queue++) {
3123                 chan = priv->plat->rx_queues_cfg[queue].chan;
3124                 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3125         }
3126 }
3127
3128 /**
3129  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3130  *  @priv: driver private structure
3131  *  Description: It is used for configuring the RX Queue Priority
3132  */
3133 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3134 {
3135         u32 rx_queues_count = priv->plat->rx_queues_to_use;
3136         u32 queue;
3137         u32 prio;
3138
3139         for (queue = 0; queue < rx_queues_count; queue++) {
3140                 if (!priv->plat->rx_queues_cfg[queue].use_prio)
3141                         continue;
3142
3143                 prio = priv->plat->rx_queues_cfg[queue].prio;
3144                 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3145         }
3146 }
3147
3148 /**
3149  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3150  *  @priv: driver private structure
3151  *  Description: It is used for configuring the TX Queue Priority
3152  */
3153 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3154 {
3155         u32 tx_queues_count = priv->plat->tx_queues_to_use;
3156         u32 queue;
3157         u32 prio;
3158
3159         for (queue = 0; queue < tx_queues_count; queue++) {
3160                 if (!priv->plat->tx_queues_cfg[queue].use_prio)
3161                         continue;
3162
3163                 prio = priv->plat->tx_queues_cfg[queue].prio;
3164                 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3165         }
3166 }
3167
3168 /**
3169  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3170  *  @priv: driver private structure
3171  *  Description: It is used for configuring the RX queue routing
3172  */
3173 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3174 {
3175         u32 rx_queues_count = priv->plat->rx_queues_to_use;
3176         u32 queue;
3177         u8 packet;
3178
3179         for (queue = 0; queue < rx_queues_count; queue++) {
3180                 /* no specific packet type routing specified for the queue */
3181                 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3182                         continue;
3183
3184                 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3185                 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3186         }
3187 }
3188
3189 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3190 {
3191         if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3192                 priv->rss.enable = false;
3193                 return;
3194         }
3195
3196         if (priv->dev->features & NETIF_F_RXHASH)
3197                 priv->rss.enable = true;
3198         else
3199                 priv->rss.enable = false;
3200
3201         stmmac_rss_configure(priv, priv->hw, &priv->rss,
3202                              priv->plat->rx_queues_to_use);
3203 }
3204
3205 /**
3206  *  stmmac_mtl_configuration - Configure MTL
3207  *  @priv: driver private structure
3208  *  Description: It is used for configurring MTL
3209  */
3210 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3211 {
3212         u32 rx_queues_count = priv->plat->rx_queues_to_use;
3213         u32 tx_queues_count = priv->plat->tx_queues_to_use;
3214
3215         if (tx_queues_count > 1)
3216                 stmmac_set_tx_queue_weight(priv);
3217
3218         /* Configure MTL RX algorithms */
3219         if (rx_queues_count > 1)
3220                 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3221                                 priv->plat->rx_sched_algorithm);
3222
3223         /* Configure MTL TX algorithms */
3224         if (tx_queues_count > 1)
3225                 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3226                                 priv->plat->tx_sched_algorithm);
3227
3228         /* Configure CBS in AVB TX queues */
3229         if (tx_queues_count > 1)
3230                 stmmac_configure_cbs(priv);
3231
3232         /* Map RX MTL to DMA channels */
3233         stmmac_rx_queue_dma_chan_map(priv);
3234
3235         /* Enable MAC RX Queues */
3236         stmmac_mac_enable_rx_queues(priv);
3237
3238         /* Set RX priorities */
3239         if (rx_queues_count > 1)
3240                 stmmac_mac_config_rx_queues_prio(priv);
3241
3242         /* Set TX priorities */
3243         if (tx_queues_count > 1)
3244                 stmmac_mac_config_tx_queues_prio(priv);
3245
3246         /* Set RX routing */
3247         if (rx_queues_count > 1)
3248                 stmmac_mac_config_rx_queues_routing(priv);
3249
3250         /* Receive Side Scaling */
3251         if (rx_queues_count > 1)
3252                 stmmac_mac_config_rss(priv);
3253 }
3254
3255 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3256 {
3257         if (priv->dma_cap.asp) {
3258                 netdev_info(priv->dev, "Enabling Safety Features\n");
3259                 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3260                                           priv->plat->safety_feat_cfg);
3261         } else {
3262                 netdev_info(priv->dev, "No Safety Features support found\n");
3263         }
3264 }
3265
3266 static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3267 {
3268         char *name;
3269
3270         clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3271         clear_bit(__FPE_REMOVING,  &priv->fpe_task_state);
3272
3273         name = priv->wq_name;
3274         sprintf(name, "%s-fpe", priv->dev->name);
3275
3276         priv->fpe_wq = create_singlethread_workqueue(name);
3277         if (!priv->fpe_wq) {
3278                 netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3279
3280                 return -ENOMEM;
3281         }
3282         netdev_info(priv->dev, "FPE workqueue start");
3283
3284         return 0;
3285 }
3286
3287 /**
3288  * stmmac_hw_setup - setup mac in a usable state.
3289  *  @dev : pointer to the device structure.
3290  *  @ptp_register: register PTP if set
3291  *  Description:
3292  *  this is the main function to setup the HW in a usable state because the
3293  *  dma engine is reset, the core registers are configured (e.g. AXI,
3294  *  Checksum features, timers). The DMA is ready to start receiving and
3295  *  transmitting.
3296  *  Return value:
3297  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3298  *  file on failure.
3299  */
3300 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3301 {
3302         struct stmmac_priv *priv = netdev_priv(dev);
3303         u32 rx_cnt = priv->plat->rx_queues_to_use;
3304         u32 tx_cnt = priv->plat->tx_queues_to_use;
3305         bool sph_en;
3306         u32 chan;
3307         int ret;
3308
3309         /* DMA initialization and SW reset */
3310         ret = stmmac_init_dma_engine(priv);
3311         if (ret < 0) {
3312                 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3313                            __func__);
3314                 return ret;
3315         }
3316
3317         /* Copy the MAC addr into the HW  */
3318         stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3319
3320         /* PS and related bits will be programmed according to the speed */
3321         if (priv->hw->pcs) {
3322                 int speed = priv->plat->mac_port_sel_speed;
3323
3324                 if ((speed == SPEED_10) || (speed == SPEED_100) ||
3325                     (speed == SPEED_1000)) {
3326                         priv->hw->ps = speed;
3327                 } else {
3328                         dev_warn(priv->device, "invalid port speed\n");
3329                         priv->hw->ps = 0;
3330                 }
3331         }
3332
3333         /* Initialize the MAC Core */
3334         stmmac_core_init(priv, priv->hw, dev);
3335
3336         /* Initialize MTL*/
3337         stmmac_mtl_configuration(priv);
3338
3339         /* Initialize Safety Features */
3340         stmmac_safety_feat_configuration(priv);
3341
3342         ret = stmmac_rx_ipc(priv, priv->hw);
3343         if (!ret) {
3344                 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3345                 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3346                 priv->hw->rx_csum = 0;
3347         }
3348
3349         /* Enable the MAC Rx/Tx */
3350         stmmac_mac_set(priv, priv->ioaddr, true);
3351
3352         /* Set the HW DMA mode and the COE */
3353         stmmac_dma_operation_mode(priv);
3354
3355         stmmac_mmc_setup(priv);
3356
3357         if (ptp_register) {
3358                 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3359                 if (ret < 0)
3360                         netdev_warn(priv->dev,
3361                                     "failed to enable PTP reference clock: %pe\n",
3362                                     ERR_PTR(ret));
3363         }
3364
3365         ret = stmmac_init_ptp(priv);
3366         if (ret == -EOPNOTSUPP)
3367                 netdev_info(priv->dev, "PTP not supported by HW\n");
3368         else if (ret)
3369                 netdev_warn(priv->dev, "PTP init failed\n");
3370         else if (ptp_register)
3371                 stmmac_ptp_register(priv);
3372
3373         priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3374
3375         /* Convert the timer from msec to usec */
3376         if (!priv->tx_lpi_timer)
3377                 priv->tx_lpi_timer = eee_timer * 1000;
3378
3379         if (priv->use_riwt) {
3380                 u32 queue;
3381
3382                 for (queue = 0; queue < rx_cnt; queue++) {
3383                         if (!priv->rx_riwt[queue])
3384                                 priv->rx_riwt[queue] = DEF_DMA_RIWT;
3385
3386                         stmmac_rx_watchdog(priv, priv->ioaddr,
3387                                            priv->rx_riwt[queue], queue);
3388                 }
3389         }
3390
3391         if (priv->hw->pcs)
3392                 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3393
3394         /* set TX and RX rings length */
3395         stmmac_set_rings_length(priv);
3396
3397         /* Enable TSO */
3398         if (priv->tso) {
3399                 for (chan = 0; chan < tx_cnt; chan++) {
3400                         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3401
3402                         /* TSO and TBS cannot co-exist */
3403                         if (tx_q->tbs & STMMAC_TBS_AVAIL)
3404                                 continue;
3405
3406                         stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3407                 }
3408         }
3409
3410         /* Enable Split Header */
3411         sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3412         for (chan = 0; chan < rx_cnt; chan++)
3413                 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3414
3415
3416         /* VLAN Tag Insertion */
3417         if (priv->dma_cap.vlins)
3418                 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3419
3420         /* TBS */
3421         for (chan = 0; chan < tx_cnt; chan++) {
3422                 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3423                 int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3424
3425                 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3426         }
3427
3428         /* Configure real RX and TX queues */
3429         netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3430         netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3431
3432         /* Start the ball rolling... */
3433         stmmac_start_all_dma(priv);
3434
3435         if (priv->dma_cap.fpesel) {
3436                 stmmac_fpe_start_wq(priv);
3437
3438                 if (priv->plat->fpe_cfg->enable)
3439                         stmmac_fpe_handshake(priv, true);
3440         }
3441
3442         return 0;
3443 }
3444
3445 static void stmmac_hw_teardown(struct net_device *dev)
3446 {
3447         struct stmmac_priv *priv = netdev_priv(dev);
3448
3449         clk_disable_unprepare(priv->plat->clk_ptp_ref);
3450 }
3451
3452 static void stmmac_free_irq(struct net_device *dev,
3453                             enum request_irq_err irq_err, int irq_idx)
3454 {
3455         struct stmmac_priv *priv = netdev_priv(dev);
3456         int j;
3457
3458         switch (irq_err) {
3459         case REQ_IRQ_ERR_ALL:
3460                 irq_idx = priv->plat->tx_queues_to_use;
3461                 fallthrough;
3462         case REQ_IRQ_ERR_TX:
3463                 for (j = irq_idx - 1; j >= 0; j--) {
3464                         if (priv->tx_irq[j] > 0) {
3465                                 irq_set_affinity_hint(priv->tx_irq[j], NULL);
3466                                 free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3467                         }
3468                 }
3469                 irq_idx = priv->plat->rx_queues_to_use;
3470                 fallthrough;
3471         case REQ_IRQ_ERR_RX:
3472                 for (j = irq_idx - 1; j >= 0; j--) {
3473                         if (priv->rx_irq[j] > 0) {
3474                                 irq_set_affinity_hint(priv->rx_irq[j], NULL);
3475                                 free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3476                         }
3477                 }
3478
3479                 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3480                         free_irq(priv->sfty_ue_irq, dev);
3481                 fallthrough;
3482         case REQ_IRQ_ERR_SFTY_UE:
3483                 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3484                         free_irq(priv->sfty_ce_irq, dev);
3485                 fallthrough;
3486         case REQ_IRQ_ERR_SFTY_CE:
3487                 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3488                         free_irq(priv->lpi_irq, dev);
3489                 fallthrough;
3490         case REQ_IRQ_ERR_LPI:
3491                 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3492                         free_irq(priv->wol_irq, dev);
3493                 fallthrough;
3494         case REQ_IRQ_ERR_WOL:
3495                 free_irq(dev->irq, dev);
3496                 fallthrough;
3497         case REQ_IRQ_ERR_MAC:
3498         case REQ_IRQ_ERR_NO:
3499                 /* If MAC IRQ request error, no more IRQ to free */
3500                 break;
3501         }
3502 }
3503
3504 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3505 {
3506         struct stmmac_priv *priv = netdev_priv(dev);
3507         enum request_irq_err irq_err;
3508         cpumask_t cpu_mask;
3509         int irq_idx = 0;
3510         char *int_name;
3511         int ret;
3512         int i;
3513
3514         /* For common interrupt */
3515         int_name = priv->int_name_mac;
3516         sprintf(int_name, "%s:%s", dev->name, "mac");
3517         ret = request_irq(dev->irq, stmmac_mac_interrupt,
3518                           0, int_name, dev);
3519         if (unlikely(ret < 0)) {
3520                 netdev_err(priv->dev,
3521                            "%s: alloc mac MSI %d (error: %d)\n",
3522                            __func__, dev->irq, ret);
3523                 irq_err = REQ_IRQ_ERR_MAC;
3524                 goto irq_error;
3525         }
3526
3527         /* Request the Wake IRQ in case of another line
3528          * is used for WoL
3529          */
3530         if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3531                 int_name = priv->int_name_wol;
3532                 sprintf(int_name, "%s:%s", dev->name, "wol");
3533                 ret = request_irq(priv->wol_irq,
3534                                   stmmac_mac_interrupt,
3535                                   0, int_name, dev);
3536                 if (unlikely(ret < 0)) {
3537                         netdev_err(priv->dev,
3538                                    "%s: alloc wol MSI %d (error: %d)\n",
3539                                    __func__, priv->wol_irq, ret);
3540                         irq_err = REQ_IRQ_ERR_WOL;
3541                         goto irq_error;
3542                 }
3543         }
3544
3545         /* Request the LPI IRQ in case of another line
3546          * is used for LPI
3547          */
3548         if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3549                 int_name = priv->int_name_lpi;
3550                 sprintf(int_name, "%s:%s", dev->name, "lpi");
3551                 ret = request_irq(priv->lpi_irq,
3552                                   stmmac_mac_interrupt,
3553                                   0, int_name, dev);
3554                 if (unlikely(ret < 0)) {
3555                         netdev_err(priv->dev,
3556                                    "%s: alloc lpi MSI %d (error: %d)\n",
3557                                    __func__, priv->lpi_irq, ret);
3558                         irq_err = REQ_IRQ_ERR_LPI;
3559                         goto irq_error;
3560                 }
3561         }
3562
3563         /* Request the Safety Feature Correctible Error line in
3564          * case of another line is used
3565          */
3566         if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3567                 int_name = priv->int_name_sfty_ce;
3568                 sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3569                 ret = request_irq(priv->sfty_ce_irq,
3570                                   stmmac_safety_interrupt,
3571                                   0, int_name, dev);
3572                 if (unlikely(ret < 0)) {
3573                         netdev_err(priv->dev,
3574                                    "%s: alloc sfty ce MSI %d (error: %d)\n",
3575                                    __func__, priv->sfty_ce_irq, ret);
3576                         irq_err = REQ_IRQ_ERR_SFTY_CE;
3577                         goto irq_error;
3578                 }
3579         }
3580
3581         /* Request the Safety Feature Uncorrectible Error line in
3582          * case of another line is used
3583          */
3584         if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3585                 int_name = priv->int_name_sfty_ue;
3586                 sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3587                 ret = request_irq(priv->sfty_ue_irq,
3588                                   stmmac_safety_interrupt,
3589                                   0, int_name, dev);
3590                 if (unlikely(ret < 0)) {
3591                         netdev_err(priv->dev,
3592                                    "%s: alloc sfty ue MSI %d (error: %d)\n",
3593                                    __func__, priv->sfty_ue_irq, ret);
3594                         irq_err = REQ_IRQ_ERR_SFTY_UE;
3595                         goto irq_error;
3596                 }
3597         }
3598
3599         /* Request Rx MSI irq */
3600         for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3601                 if (i >= MTL_MAX_RX_QUEUES)
3602                         break;
3603                 if (priv->rx_irq[i] == 0)
3604                         continue;
3605
3606                 int_name = priv->int_name_rx_irq[i];
3607                 sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3608                 ret = request_irq(priv->rx_irq[i],
3609                                   stmmac_msi_intr_rx,
3610                                   0, int_name, &priv->dma_conf.rx_queue[i]);
3611                 if (unlikely(ret < 0)) {
3612                         netdev_err(priv->dev,
3613                                    "%s: alloc rx-%d  MSI %d (error: %d)\n",
3614                                    __func__, i, priv->rx_irq[i], ret);
3615                         irq_err = REQ_IRQ_ERR_RX;
3616                         irq_idx = i;
3617                         goto irq_error;
3618                 }
3619                 cpumask_clear(&cpu_mask);
3620                 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3621                 irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3622         }
3623
3624         /* Request Tx MSI irq */
3625         for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3626                 if (i >= MTL_MAX_TX_QUEUES)
3627                         break;
3628                 if (priv->tx_irq[i] == 0)
3629                         continue;
3630
3631                 int_name = priv->int_name_tx_irq[i];
3632                 sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3633                 ret = request_irq(priv->tx_irq[i],
3634                                   stmmac_msi_intr_tx,
3635                                   0, int_name, &priv->dma_conf.tx_queue[i]);
3636                 if (unlikely(ret < 0)) {
3637                         netdev_err(priv->dev,
3638                                    "%s: alloc tx-%d  MSI %d (error: %d)\n",
3639                                    __func__, i, priv->tx_irq[i], ret);
3640                         irq_err = REQ_IRQ_ERR_TX;
3641                         irq_idx = i;
3642                         goto irq_error;
3643                 }
3644                 cpumask_clear(&cpu_mask);
3645                 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3646                 irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3647         }
3648
3649         return 0;
3650
3651 irq_error:
3652         stmmac_free_irq(dev, irq_err, irq_idx);
3653         return ret;
3654 }
3655
3656 static int stmmac_request_irq_single(struct net_device *dev)
3657 {
3658         struct stmmac_priv *priv = netdev_priv(dev);
3659         enum request_irq_err irq_err;
3660         int ret;
3661
3662         ret = request_irq(dev->irq, stmmac_interrupt,
3663                           IRQF_SHARED, dev->name, dev);
3664         if (unlikely(ret < 0)) {
3665                 netdev_err(priv->dev,
3666                            "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3667                            __func__, dev->irq, ret);
3668                 irq_err = REQ_IRQ_ERR_MAC;
3669                 goto irq_error;
3670         }
3671
3672         /* Request the Wake IRQ in case of another line
3673          * is used for WoL
3674          */
3675         if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3676                 ret = request_irq(priv->wol_irq, stmmac_interrupt,
3677                                   IRQF_SHARED, dev->name, dev);
3678                 if (unlikely(ret < 0)) {
3679                         netdev_err(priv->dev,
3680                                    "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3681                                    __func__, priv->wol_irq, ret);
3682                         irq_err = REQ_IRQ_ERR_WOL;
3683                         goto irq_error;
3684                 }
3685         }
3686
3687         /* Request the IRQ lines */
3688         if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3689                 ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3690                                   IRQF_SHARED, dev->name, dev);
3691                 if (unlikely(ret < 0)) {
3692                         netdev_err(priv->dev,
3693                                    "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3694                                    __func__, priv->lpi_irq, ret);
3695                         irq_err = REQ_IRQ_ERR_LPI;
3696                         goto irq_error;
3697                 }
3698         }
3699
3700         return 0;
3701
3702 irq_error:
3703         stmmac_free_irq(dev, irq_err, 0);
3704         return ret;
3705 }
3706
3707 static int stmmac_request_irq(struct net_device *dev)
3708 {
3709         struct stmmac_priv *priv = netdev_priv(dev);
3710         int ret;
3711
3712         /* Request the IRQ lines */
3713         if (priv->plat->multi_msi_en)
3714                 ret = stmmac_request_irq_multi_msi(dev);
3715         else
3716                 ret = stmmac_request_irq_single(dev);
3717
3718         return ret;
3719 }
3720
3721 /**
3722  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3723  *  @priv: driver private structure
3724  *  @mtu: MTU to setup the dma queue and buf with
3725  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3726  *  Allocate the Tx/Rx DMA queue and init them.
3727  *  Return value:
3728  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3729  */
3730 static struct stmmac_dma_conf *
3731 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3732 {
3733         struct stmmac_dma_conf *dma_conf;
3734         int chan, bfsize, ret;
3735
3736         dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3737         if (!dma_conf) {
3738                 netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3739                            __func__);
3740                 return ERR_PTR(-ENOMEM);
3741         }
3742
3743         bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3744         if (bfsize < 0)
3745                 bfsize = 0;
3746
3747         if (bfsize < BUF_SIZE_16KiB)
3748                 bfsize = stmmac_set_bfsize(mtu, 0);
3749
3750         dma_conf->dma_buf_sz = bfsize;
3751         /* Chose the tx/rx size from the already defined one in the
3752          * priv struct. (if defined)
3753          */
3754         dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3755         dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3756
3757         if (!dma_conf->dma_tx_size)
3758                 dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3759         if (!dma_conf->dma_rx_size)
3760                 dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3761
3762         /* Earlier check for TBS */
3763         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3764                 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3765                 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3766
3767                 /* Setup per-TXQ tbs flag before TX descriptor alloc */
3768                 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3769         }
3770
3771         ret = alloc_dma_desc_resources(priv, dma_conf);
3772         if (ret < 0) {
3773                 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3774                            __func__);
3775                 goto alloc_error;
3776         }
3777
3778         ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3779         if (ret < 0) {
3780                 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3781                            __func__);
3782                 goto init_error;
3783         }
3784
3785         return dma_conf;
3786
3787 init_error:
3788         free_dma_desc_resources(priv, dma_conf);
3789 alloc_error:
3790         kfree(dma_conf);
3791         return ERR_PTR(ret);
3792 }
3793
3794 /**
3795  *  __stmmac_open - open entry point of the driver
3796  *  @dev : pointer to the device structure.
3797  *  @dma_conf :  structure to take the dma data
3798  *  Description:
3799  *  This function is the open entry point of the driver.
3800  *  Return value:
3801  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3802  *  file on failure.
3803  */
3804 static int __stmmac_open(struct net_device *dev,
3805                          struct stmmac_dma_conf *dma_conf)
3806 {
3807         struct stmmac_priv *priv = netdev_priv(dev);
3808         int mode = priv->plat->phy_interface;
3809         u32 chan;
3810         int ret;
3811
3812         ret = pm_runtime_resume_and_get(priv->device);
3813         if (ret < 0)
3814                 return ret;
3815
3816         if (priv->hw->pcs != STMMAC_PCS_TBI &&
3817             priv->hw->pcs != STMMAC_PCS_RTBI &&
3818             (!priv->hw->xpcs ||
3819              xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73) &&
3820             !priv->hw->lynx_pcs) {
3821                 ret = stmmac_init_phy(dev);
3822                 if (ret) {
3823                         netdev_err(priv->dev,
3824                                    "%s: Cannot attach to PHY (error: %d)\n",
3825                                    __func__, ret);
3826                         goto init_phy_error;
3827                 }
3828         }
3829
3830         /* Extra statistics */
3831         memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
3832         priv->xstats.threshold = tc;
3833
3834         priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3835
3836         buf_sz = dma_conf->dma_buf_sz;
3837         memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3838
3839         stmmac_reset_queues_param(priv);
3840
3841         if (!priv->plat->serdes_up_after_phy_linkup && priv->plat->serdes_powerup) {
3842                 ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3843                 if (ret < 0) {
3844                         netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3845                                    __func__);
3846                         goto init_error;
3847                 }
3848         }
3849
3850         ret = stmmac_hw_setup(dev, true);
3851         if (ret < 0) {
3852                 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3853                 goto init_error;
3854         }
3855
3856         stmmac_init_coalesce(priv);
3857
3858         phylink_start(priv->phylink);
3859         /* We may have called phylink_speed_down before */
3860         phylink_speed_up(priv->phylink);
3861
3862         ret = stmmac_request_irq(dev);
3863         if (ret)
3864                 goto irq_error;
3865
3866         stmmac_enable_all_queues(priv);
3867         netif_tx_start_all_queues(priv->dev);
3868         stmmac_enable_all_dma_irq(priv);
3869
3870         return 0;
3871
3872 irq_error:
3873         phylink_stop(priv->phylink);
3874
3875         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3876                 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3877
3878         stmmac_hw_teardown(dev);
3879 init_error:
3880         phylink_disconnect_phy(priv->phylink);
3881 init_phy_error:
3882         pm_runtime_put(priv->device);
3883         return ret;
3884 }
3885
3886 static int stmmac_open(struct net_device *dev)
3887 {
3888         struct stmmac_priv *priv = netdev_priv(dev);
3889         struct stmmac_dma_conf *dma_conf;
3890         int ret;
3891
3892         dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
3893         if (IS_ERR(dma_conf))
3894                 return PTR_ERR(dma_conf);
3895
3896         ret = __stmmac_open(dev, dma_conf);
3897         if (ret)
3898                 free_dma_desc_resources(priv, dma_conf);
3899
3900         kfree(dma_conf);
3901         return ret;
3902 }
3903
3904 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
3905 {
3906         set_bit(__FPE_REMOVING, &priv->fpe_task_state);
3907
3908         if (priv->fpe_wq)
3909                 destroy_workqueue(priv->fpe_wq);
3910
3911         netdev_info(priv->dev, "FPE workqueue stop");
3912 }
3913
3914 /**
3915  *  stmmac_release - close entry point of the driver
3916  *  @dev : device pointer.
3917  *  Description:
3918  *  This is the stop entry point of the driver.
3919  */
3920 static int stmmac_release(struct net_device *dev)
3921 {
3922         struct stmmac_priv *priv = netdev_priv(dev);
3923         u32 chan;
3924
3925         if (device_may_wakeup(priv->device))
3926                 phylink_speed_down(priv->phylink, false);
3927         /* Stop and disconnect the PHY */
3928         phylink_stop(priv->phylink);
3929         phylink_disconnect_phy(priv->phylink);
3930
3931         stmmac_disable_all_queues(priv);
3932
3933         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3934                 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3935
3936         netif_tx_disable(dev);
3937
3938         /* Free the IRQ lines */
3939         stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
3940
3941         if (priv->eee_enabled) {
3942                 priv->tx_path_in_lpi_mode = false;
3943                 del_timer_sync(&priv->eee_ctrl_timer);
3944         }
3945
3946         /* Stop TX/RX DMA and clear the descriptors */
3947         stmmac_stop_all_dma(priv);
3948
3949         /* Release and free the Rx/Tx resources */
3950         free_dma_desc_resources(priv, &priv->dma_conf);
3951
3952         /* Disable the MAC Rx/Tx */
3953         stmmac_mac_set(priv, priv->ioaddr, false);
3954
3955         /* Powerdown Serdes if there is */
3956         if (priv->plat->serdes_powerdown)
3957                 priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
3958
3959         netif_carrier_off(dev);
3960
3961         stmmac_release_ptp(priv);
3962
3963         pm_runtime_put(priv->device);
3964
3965         if (priv->dma_cap.fpesel)
3966                 stmmac_fpe_stop_wq(priv);
3967
3968         return 0;
3969 }
3970
3971 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
3972                                struct stmmac_tx_queue *tx_q)
3973 {
3974         u16 tag = 0x0, inner_tag = 0x0;
3975         u32 inner_type = 0x0;
3976         struct dma_desc *p;
3977
3978         if (!priv->dma_cap.vlins)
3979                 return false;
3980         if (!skb_vlan_tag_present(skb))
3981                 return false;
3982         if (skb->vlan_proto == htons(ETH_P_8021AD)) {
3983                 inner_tag = skb_vlan_tag_get(skb);
3984                 inner_type = STMMAC_VLAN_INSERT;
3985         }
3986
3987         tag = skb_vlan_tag_get(skb);
3988
3989         if (tx_q->tbs & STMMAC_TBS_AVAIL)
3990                 p = &tx_q->dma_entx[tx_q->cur_tx].basic;
3991         else
3992                 p = &tx_q->dma_tx[tx_q->cur_tx];
3993
3994         if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
3995                 return false;
3996
3997         stmmac_set_tx_owner(priv, p);
3998         tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
3999         return true;
4000 }
4001
4002 /**
4003  *  stmmac_tso_allocator - close entry point of the driver
4004  *  @priv: driver private structure
4005  *  @des: buffer start address
4006  *  @total_len: total length to fill in descriptors
4007  *  @last_segment: condition for the last descriptor
4008  *  @queue: TX queue index
4009  *  Description:
4010  *  This function fills descriptor and request new descriptors according to
4011  *  buffer length to fill
4012  */
4013 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4014                                  int total_len, bool last_segment, u32 queue)
4015 {
4016         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4017         struct dma_desc *desc;
4018         u32 buff_size;
4019         int tmp_len;
4020
4021         tmp_len = total_len;
4022
4023         while (tmp_len > 0) {
4024                 dma_addr_t curr_addr;
4025
4026                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4027                                                 priv->dma_conf.dma_tx_size);
4028                 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4029
4030                 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4031                         desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4032                 else
4033                         desc = &tx_q->dma_tx[tx_q->cur_tx];
4034
4035                 curr_addr = des + (total_len - tmp_len);
4036                 if (priv->dma_cap.addr64 <= 32)
4037                         desc->des0 = cpu_to_le32(curr_addr);
4038                 else
4039                         stmmac_set_desc_addr(priv, desc, curr_addr);
4040
4041                 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4042                             TSO_MAX_BUFF_SIZE : tmp_len;
4043
4044                 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4045                                 0, 1,
4046                                 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4047                                 0, 0);
4048
4049                 tmp_len -= TSO_MAX_BUFF_SIZE;
4050         }
4051 }
4052
4053 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4054 {
4055         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4056         int desc_size;
4057
4058         if (likely(priv->extend_desc))
4059                 desc_size = sizeof(struct dma_extended_desc);
4060         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4061                 desc_size = sizeof(struct dma_edesc);
4062         else
4063                 desc_size = sizeof(struct dma_desc);
4064
4065         /* The own bit must be the latest setting done when prepare the
4066          * descriptor and then barrier is needed to make sure that
4067          * all is coherent before granting the DMA engine.
4068          */
4069         wmb();
4070
4071         tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4072         stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4073 }
4074
4075 /**
4076  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4077  *  @skb : the socket buffer
4078  *  @dev : device pointer
4079  *  Description: this is the transmit function that is called on TSO frames
4080  *  (support available on GMAC4 and newer chips).
4081  *  Diagram below show the ring programming in case of TSO frames:
4082  *
4083  *  First Descriptor
4084  *   --------
4085  *   | DES0 |---> buffer1 = L2/L3/L4 header
4086  *   | DES1 |---> TCP Payload (can continue on next descr...)
4087  *   | DES2 |---> buffer 1 and 2 len
4088  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4089  *   --------
4090  *      |
4091  *     ...
4092  *      |
4093  *   --------
4094  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
4095  *   | DES1 | --|
4096  *   | DES2 | --> buffer 1 and 2 len
4097  *   | DES3 |
4098  *   --------
4099  *
4100  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4101  */
4102 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4103 {
4104         struct dma_desc *desc, *first, *mss_desc = NULL;
4105         struct stmmac_priv *priv = netdev_priv(dev);
4106         int nfrags = skb_shinfo(skb)->nr_frags;
4107         u32 queue = skb_get_queue_mapping(skb);
4108         unsigned int first_entry, tx_packets;
4109         int tmp_pay_len = 0, first_tx;
4110         struct stmmac_tx_queue *tx_q;
4111         bool has_vlan, set_ic;
4112         u8 proto_hdr_len, hdr;
4113         u32 pay_len, mss;
4114         dma_addr_t des;
4115         int i;
4116
4117         tx_q = &priv->dma_conf.tx_queue[queue];
4118         first_tx = tx_q->cur_tx;
4119
4120         /* Compute header lengths */
4121         if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4122                 proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4123                 hdr = sizeof(struct udphdr);
4124         } else {
4125                 proto_hdr_len = skb_tcp_all_headers(skb);
4126                 hdr = tcp_hdrlen(skb);
4127         }
4128
4129         /* Desc availability based on threshold should be enough safe */
4130         if (unlikely(stmmac_tx_avail(priv, queue) <
4131                 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4132                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4133                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4134                                                                 queue));
4135                         /* This is a hard error, log it. */
4136                         netdev_err(priv->dev,
4137                                    "%s: Tx Ring full when queue awake\n",
4138                                    __func__);
4139                 }
4140                 return NETDEV_TX_BUSY;
4141         }
4142
4143         pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4144
4145         mss = skb_shinfo(skb)->gso_size;
4146
4147         /* set new MSS value if needed */
4148         if (mss != tx_q->mss) {
4149                 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4150                         mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4151                 else
4152                         mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4153
4154                 stmmac_set_mss(priv, mss_desc, mss);
4155                 tx_q->mss = mss;
4156                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4157                                                 priv->dma_conf.dma_tx_size);
4158                 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4159         }
4160
4161         if (netif_msg_tx_queued(priv)) {
4162                 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4163                         __func__, hdr, proto_hdr_len, pay_len, mss);
4164                 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4165                         skb->data_len);
4166         }
4167
4168         /* Check if VLAN can be inserted by HW */
4169         has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4170
4171         first_entry = tx_q->cur_tx;
4172         WARN_ON(tx_q->tx_skbuff[first_entry]);
4173
4174         if (tx_q->tbs & STMMAC_TBS_AVAIL)
4175                 desc = &tx_q->dma_entx[first_entry].basic;
4176         else
4177                 desc = &tx_q->dma_tx[first_entry];
4178         first = desc;
4179
4180         if (has_vlan)
4181                 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4182
4183         /* first descriptor: fill Headers on Buf1 */
4184         des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4185                              DMA_TO_DEVICE);
4186         if (dma_mapping_error(priv->device, des))
4187                 goto dma_map_err;
4188
4189         tx_q->tx_skbuff_dma[first_entry].buf = des;
4190         tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4191         tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4192         tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4193
4194         if (priv->dma_cap.addr64 <= 32) {
4195                 first->des0 = cpu_to_le32(des);
4196
4197                 /* Fill start of payload in buff2 of first descriptor */
4198                 if (pay_len)
4199                         first->des1 = cpu_to_le32(des + proto_hdr_len);
4200
4201                 /* If needed take extra descriptors to fill the remaining payload */
4202                 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4203         } else {
4204                 stmmac_set_desc_addr(priv, first, des);
4205                 tmp_pay_len = pay_len;
4206                 des += proto_hdr_len;
4207                 pay_len = 0;
4208         }
4209
4210         stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4211
4212         /* Prepare fragments */
4213         for (i = 0; i < nfrags; i++) {
4214                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4215
4216                 des = skb_frag_dma_map(priv->device, frag, 0,
4217                                        skb_frag_size(frag),
4218                                        DMA_TO_DEVICE);
4219                 if (dma_mapping_error(priv->device, des))
4220                         goto dma_map_err;
4221
4222                 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4223                                      (i == nfrags - 1), queue);
4224
4225                 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4226                 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4227                 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4228                 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4229         }
4230
4231         tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4232
4233         /* Only the last descriptor gets to point to the skb. */
4234         tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4235         tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4236
4237         /* Manage tx mitigation */
4238         tx_packets = (tx_q->cur_tx + 1) - first_tx;
4239         tx_q->tx_count_frames += tx_packets;
4240
4241         if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4242                 set_ic = true;
4243         else if (!priv->tx_coal_frames[queue])
4244                 set_ic = false;
4245         else if (tx_packets > priv->tx_coal_frames[queue])
4246                 set_ic = true;
4247         else if ((tx_q->tx_count_frames %
4248                   priv->tx_coal_frames[queue]) < tx_packets)
4249                 set_ic = true;
4250         else
4251                 set_ic = false;
4252
4253         if (set_ic) {
4254                 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4255                         desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4256                 else
4257                         desc = &tx_q->dma_tx[tx_q->cur_tx];
4258
4259                 tx_q->tx_count_frames = 0;
4260                 stmmac_set_tx_ic(priv, desc);
4261                 priv->xstats.tx_set_ic_bit++;
4262         }
4263
4264         /* We've used all descriptors we need for this skb, however,
4265          * advance cur_tx so that it references a fresh descriptor.
4266          * ndo_start_xmit will fill this descriptor the next time it's
4267          * called and stmmac_tx_clean may clean up to this descriptor.
4268          */
4269         tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4270
4271         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4272                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4273                           __func__);
4274                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4275         }
4276
4277         dev->stats.tx_bytes += skb->len;
4278         priv->xstats.tx_tso_frames++;
4279         priv->xstats.tx_tso_nfrags += nfrags;
4280
4281         if (priv->sarc_type)
4282                 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4283
4284         skb_tx_timestamp(skb);
4285
4286         if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4287                      priv->hwts_tx_en)) {
4288                 /* declare that device is doing timestamping */
4289                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4290                 stmmac_enable_tx_timestamp(priv, first);
4291         }
4292
4293         /* Complete the first descriptor before granting the DMA */
4294         stmmac_prepare_tso_tx_desc(priv, first, 1,
4295                         proto_hdr_len,
4296                         pay_len,
4297                         1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4298                         hdr / 4, (skb->len - proto_hdr_len));
4299
4300         /* If context desc is used to change MSS */
4301         if (mss_desc) {
4302                 /* Make sure that first descriptor has been completely
4303                  * written, including its own bit. This is because MSS is
4304                  * actually before first descriptor, so we need to make
4305                  * sure that MSS's own bit is the last thing written.
4306                  */
4307                 dma_wmb();
4308                 stmmac_set_tx_owner(priv, mss_desc);
4309         }
4310
4311         if (netif_msg_pktdata(priv)) {
4312                 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4313                         __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4314                         tx_q->cur_tx, first, nfrags);
4315                 pr_info(">>> frame to be transmitted: ");
4316                 print_pkt(skb->data, skb_headlen(skb));
4317         }
4318
4319         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4320
4321         stmmac_flush_tx_descriptors(priv, queue);
4322         stmmac_tx_timer_arm(priv, queue);
4323
4324         return NETDEV_TX_OK;
4325
4326 dma_map_err:
4327         dev_err(priv->device, "Tx dma map failed\n");
4328         dev_kfree_skb(skb);
4329         priv->dev->stats.tx_dropped++;
4330         return NETDEV_TX_OK;
4331 }
4332
4333 /**
4334  *  stmmac_xmit - Tx entry point of the driver
4335  *  @skb : the socket buffer
4336  *  @dev : device pointer
4337  *  Description : this is the tx entry point of the driver.
4338  *  It programs the chain or the ring and supports oversized frames
4339  *  and SG feature.
4340  */
4341 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4342 {
4343         unsigned int first_entry, tx_packets, enh_desc;
4344         struct stmmac_priv *priv = netdev_priv(dev);
4345         unsigned int nopaged_len = skb_headlen(skb);
4346         int i, csum_insertion = 0, is_jumbo = 0;
4347         u32 queue = skb_get_queue_mapping(skb);
4348         int nfrags = skb_shinfo(skb)->nr_frags;
4349         int gso = skb_shinfo(skb)->gso_type;
4350         struct dma_edesc *tbs_desc = NULL;
4351         struct dma_desc *desc, *first;
4352         struct stmmac_tx_queue *tx_q;
4353         bool has_vlan, set_ic;
4354         int entry, first_tx;
4355         dma_addr_t des;
4356
4357         tx_q = &priv->dma_conf.tx_queue[queue];
4358         first_tx = tx_q->cur_tx;
4359
4360         if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4361                 stmmac_disable_eee_mode(priv);
4362
4363         /* Manage oversized TCP frames for GMAC4 device */
4364         if (skb_is_gso(skb) && priv->tso) {
4365                 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4366                         return stmmac_tso_xmit(skb, dev);
4367                 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4368                         return stmmac_tso_xmit(skb, dev);
4369         }
4370
4371         if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4372                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4373                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4374                                                                 queue));
4375                         /* This is a hard error, log it. */
4376                         netdev_err(priv->dev,
4377                                    "%s: Tx Ring full when queue awake\n",
4378                                    __func__);
4379                 }
4380                 return NETDEV_TX_BUSY;
4381         }
4382
4383         /* Check if VLAN can be inserted by HW */
4384         has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4385
4386         entry = tx_q->cur_tx;
4387         first_entry = entry;
4388         WARN_ON(tx_q->tx_skbuff[first_entry]);
4389
4390         csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4391
4392         if (likely(priv->extend_desc))
4393                 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4394         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4395                 desc = &tx_q->dma_entx[entry].basic;
4396         else
4397                 desc = tx_q->dma_tx + entry;
4398
4399         first = desc;
4400
4401         if (has_vlan)
4402                 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4403
4404         enh_desc = priv->plat->enh_desc;
4405         /* To program the descriptors according to the size of the frame */
4406         if (enh_desc)
4407                 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4408
4409         if (unlikely(is_jumbo)) {
4410                 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4411                 if (unlikely(entry < 0) && (entry != -EINVAL))
4412                         goto dma_map_err;
4413         }
4414
4415         for (i = 0; i < nfrags; i++) {
4416                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4417                 int len = skb_frag_size(frag);
4418                 bool last_segment = (i == (nfrags - 1));
4419
4420                 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4421                 WARN_ON(tx_q->tx_skbuff[entry]);
4422
4423                 if (likely(priv->extend_desc))
4424                         desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4425                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4426                         desc = &tx_q->dma_entx[entry].basic;
4427                 else
4428                         desc = tx_q->dma_tx + entry;
4429
4430                 des = skb_frag_dma_map(priv->device, frag, 0, len,
4431                                        DMA_TO_DEVICE);
4432                 if (dma_mapping_error(priv->device, des))
4433                         goto dma_map_err; /* should reuse desc w/o issues */
4434
4435                 tx_q->tx_skbuff_dma[entry].buf = des;
4436
4437                 stmmac_set_desc_addr(priv, desc, des);
4438
4439                 tx_q->tx_skbuff_dma[entry].map_as_page = true;
4440                 tx_q->tx_skbuff_dma[entry].len = len;
4441                 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4442                 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4443
4444                 /* Prepare the descriptor and set the own bit too */
4445                 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4446                                 priv->mode, 1, last_segment, skb->len);
4447         }
4448
4449         /* Only the last descriptor gets to point to the skb. */
4450         tx_q->tx_skbuff[entry] = skb;
4451         tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4452
4453         /* According to the coalesce parameter the IC bit for the latest
4454          * segment is reset and the timer re-started to clean the tx status.
4455          * This approach takes care about the fragments: desc is the first
4456          * element in case of no SG.
4457          */
4458         tx_packets = (entry + 1) - first_tx;
4459         tx_q->tx_count_frames += tx_packets;
4460
4461         if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4462                 set_ic = true;
4463         else if (!priv->tx_coal_frames[queue])
4464                 set_ic = false;
4465         else if (tx_packets > priv->tx_coal_frames[queue])
4466                 set_ic = true;
4467         else if ((tx_q->tx_count_frames %
4468                   priv->tx_coal_frames[queue]) < tx_packets)
4469                 set_ic = true;
4470         else
4471                 set_ic = false;
4472
4473         if (set_ic) {
4474                 if (likely(priv->extend_desc))
4475                         desc = &tx_q->dma_etx[entry].basic;
4476                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4477                         desc = &tx_q->dma_entx[entry].basic;
4478                 else
4479                         desc = &tx_q->dma_tx[entry];
4480
4481                 tx_q->tx_count_frames = 0;
4482                 stmmac_set_tx_ic(priv, desc);
4483                 priv->xstats.tx_set_ic_bit++;
4484         }
4485
4486         /* We've used all descriptors we need for this skb, however,
4487          * advance cur_tx so that it references a fresh descriptor.
4488          * ndo_start_xmit will fill this descriptor the next time it's
4489          * called and stmmac_tx_clean may clean up to this descriptor.
4490          */
4491         entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4492         tx_q->cur_tx = entry;
4493
4494         if (netif_msg_pktdata(priv)) {
4495                 netdev_dbg(priv->dev,
4496                            "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4497                            __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4498                            entry, first, nfrags);
4499
4500                 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4501                 print_pkt(skb->data, skb->len);
4502         }
4503
4504         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4505                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4506                           __func__);
4507                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4508         }
4509
4510         dev->stats.tx_bytes += skb->len;
4511
4512         if (priv->sarc_type)
4513                 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4514
4515         skb_tx_timestamp(skb);
4516
4517         /* Ready to fill the first descriptor and set the OWN bit w/o any
4518          * problems because all the descriptors are actually ready to be
4519          * passed to the DMA engine.
4520          */
4521         if (likely(!is_jumbo)) {
4522                 bool last_segment = (nfrags == 0);
4523
4524                 des = dma_map_single(priv->device, skb->data,
4525                                      nopaged_len, DMA_TO_DEVICE);
4526                 if (dma_mapping_error(priv->device, des))
4527                         goto dma_map_err;
4528
4529                 tx_q->tx_skbuff_dma[first_entry].buf = des;
4530                 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4531                 tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4532
4533                 stmmac_set_desc_addr(priv, first, des);
4534
4535                 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4536                 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4537
4538                 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4539                              priv->hwts_tx_en)) {
4540                         /* declare that device is doing timestamping */
4541                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4542                         stmmac_enable_tx_timestamp(priv, first);
4543                 }
4544
4545                 /* Prepare the first descriptor setting the OWN bit too */
4546                 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4547                                 csum_insertion, priv->mode, 0, last_segment,
4548                                 skb->len);
4549         }
4550
4551         if (tx_q->tbs & STMMAC_TBS_EN) {
4552                 struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4553
4554                 tbs_desc = &tx_q->dma_entx[first_entry];
4555                 stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4556         }
4557
4558         stmmac_set_tx_owner(priv, first);
4559
4560         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4561
4562         stmmac_enable_dma_transmission(priv, priv->ioaddr);
4563
4564         stmmac_flush_tx_descriptors(priv, queue);
4565         stmmac_tx_timer_arm(priv, queue);
4566
4567         return NETDEV_TX_OK;
4568
4569 dma_map_err:
4570         netdev_err(priv->dev, "Tx DMA map failed\n");
4571         dev_kfree_skb(skb);
4572         priv->dev->stats.tx_dropped++;
4573         return NETDEV_TX_OK;
4574 }
4575
4576 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4577 {
4578         struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4579         __be16 vlan_proto = veth->h_vlan_proto;
4580         u16 vlanid;
4581
4582         if ((vlan_proto == htons(ETH_P_8021Q) &&
4583              dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4584             (vlan_proto == htons(ETH_P_8021AD) &&
4585              dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4586                 /* pop the vlan tag */
4587                 vlanid = ntohs(veth->h_vlan_TCI);
4588                 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4589                 skb_pull(skb, VLAN_HLEN);
4590                 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4591         }
4592 }
4593
4594 /**
4595  * stmmac_rx_refill - refill used skb preallocated buffers
4596  * @priv: driver private structure
4597  * @queue: RX queue index
4598  * Description : this is to reallocate the skb for the reception process
4599  * that is based on zero-copy.
4600  */
4601 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4602 {
4603         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4604         int dirty = stmmac_rx_dirty(priv, queue);
4605         unsigned int entry = rx_q->dirty_rx;
4606         gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4607
4608         if (priv->dma_cap.host_dma_width <= 32)
4609                 gfp |= GFP_DMA32;
4610
4611         while (dirty-- > 0) {
4612                 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4613                 struct dma_desc *p;
4614                 bool use_rx_wd;
4615
4616                 if (priv->extend_desc)
4617                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
4618                 else
4619                         p = rx_q->dma_rx + entry;
4620
4621                 if (!buf->page) {
4622                         buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4623                         if (!buf->page)
4624                                 break;
4625                 }
4626
4627                 if (priv->sph && !buf->sec_page) {
4628                         buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4629                         if (!buf->sec_page)
4630                                 break;
4631
4632                         buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4633                 }
4634
4635                 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4636
4637                 stmmac_set_desc_addr(priv, p, buf->addr);
4638                 if (priv->sph)
4639                         stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4640                 else
4641                         stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4642                 stmmac_refill_desc3(priv, rx_q, p);
4643
4644                 rx_q->rx_count_frames++;
4645                 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4646                 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4647                         rx_q->rx_count_frames = 0;
4648
4649                 use_rx_wd = !priv->rx_coal_frames[queue];
4650                 use_rx_wd |= rx_q->rx_count_frames > 0;
4651                 if (!priv->use_riwt)
4652                         use_rx_wd = false;
4653
4654                 dma_wmb();
4655                 stmmac_set_rx_owner(priv, p, use_rx_wd);
4656
4657                 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4658         }
4659         rx_q->dirty_rx = entry;
4660         rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4661                             (rx_q->dirty_rx * sizeof(struct dma_desc));
4662         stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4663 }
4664
4665 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4666                                        struct dma_desc *p,
4667                                        int status, unsigned int len)
4668 {
4669         unsigned int plen = 0, hlen = 0;
4670         int coe = priv->hw->rx_csum;
4671
4672         /* Not first descriptor, buffer is always zero */
4673         if (priv->sph && len)
4674                 return 0;
4675
4676         /* First descriptor, get split header length */
4677         stmmac_get_rx_header_len(priv, p, &hlen);
4678         if (priv->sph && hlen) {
4679                 priv->xstats.rx_split_hdr_pkt_n++;
4680                 return hlen;
4681         }
4682
4683         /* First descriptor, not last descriptor and not split header */
4684         if (status & rx_not_ls)
4685                 return priv->dma_conf.dma_buf_sz;
4686
4687         plen = stmmac_get_rx_frame_len(priv, p, coe);
4688
4689         /* First descriptor and last descriptor and not split header */
4690         return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4691 }
4692
4693 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4694                                        struct dma_desc *p,
4695                                        int status, unsigned int len)
4696 {
4697         int coe = priv->hw->rx_csum;
4698         unsigned int plen = 0;
4699
4700         /* Not split header, buffer is not available */
4701         if (!priv->sph)
4702                 return 0;
4703
4704         /* Not last descriptor */
4705         if (status & rx_not_ls)
4706                 return priv->dma_conf.dma_buf_sz;
4707
4708         plen = stmmac_get_rx_frame_len(priv, p, coe);
4709
4710         /* Last descriptor */
4711         return plen - len;
4712 }
4713
4714 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4715                                 struct xdp_frame *xdpf, bool dma_map)
4716 {
4717         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4718         unsigned int entry = tx_q->cur_tx;
4719         struct dma_desc *tx_desc;
4720         dma_addr_t dma_addr;
4721         bool set_ic;
4722
4723         if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4724                 return STMMAC_XDP_CONSUMED;
4725
4726         if (likely(priv->extend_desc))
4727                 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4728         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4729                 tx_desc = &tx_q->dma_entx[entry].basic;
4730         else
4731                 tx_desc = tx_q->dma_tx + entry;
4732
4733         if (dma_map) {
4734                 dma_addr = dma_map_single(priv->device, xdpf->data,
4735                                           xdpf->len, DMA_TO_DEVICE);
4736                 if (dma_mapping_error(priv->device, dma_addr))
4737                         return STMMAC_XDP_CONSUMED;
4738
4739                 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4740         } else {
4741                 struct page *page = virt_to_page(xdpf->data);
4742
4743                 dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4744                            xdpf->headroom;
4745                 dma_sync_single_for_device(priv->device, dma_addr,
4746                                            xdpf->len, DMA_BIDIRECTIONAL);
4747
4748                 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4749         }
4750
4751         tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4752         tx_q->tx_skbuff_dma[entry].map_as_page = false;
4753         tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4754         tx_q->tx_skbuff_dma[entry].last_segment = true;
4755         tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4756
4757         tx_q->xdpf[entry] = xdpf;
4758
4759         stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4760
4761         stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4762                                true, priv->mode, true, true,
4763                                xdpf->len);
4764
4765         tx_q->tx_count_frames++;
4766
4767         if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4768                 set_ic = true;
4769         else
4770                 set_ic = false;
4771
4772         if (set_ic) {
4773                 tx_q->tx_count_frames = 0;
4774                 stmmac_set_tx_ic(priv, tx_desc);
4775                 priv->xstats.tx_set_ic_bit++;
4776         }
4777
4778         stmmac_enable_dma_transmission(priv, priv->ioaddr);
4779
4780         entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4781         tx_q->cur_tx = entry;
4782
4783         return STMMAC_XDP_TX;
4784 }
4785
4786 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4787                                    int cpu)
4788 {
4789         int index = cpu;
4790
4791         if (unlikely(index < 0))
4792                 index = 0;
4793
4794         while (index >= priv->plat->tx_queues_to_use)
4795                 index -= priv->plat->tx_queues_to_use;
4796
4797         return index;
4798 }
4799
4800 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4801                                 struct xdp_buff *xdp)
4802 {
4803         struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4804         int cpu = smp_processor_id();
4805         struct netdev_queue *nq;
4806         int queue;
4807         int res;
4808
4809         if (unlikely(!xdpf))
4810                 return STMMAC_XDP_CONSUMED;
4811
4812         queue = stmmac_xdp_get_tx_queue(priv, cpu);
4813         nq = netdev_get_tx_queue(priv->dev, queue);
4814
4815         __netif_tx_lock(nq, cpu);
4816         /* Avoids TX time-out as we are sharing with slow path */
4817         txq_trans_cond_update(nq);
4818
4819         res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4820         if (res == STMMAC_XDP_TX)
4821                 stmmac_flush_tx_descriptors(priv, queue);
4822
4823         __netif_tx_unlock(nq);
4824
4825         return res;
4826 }
4827
4828 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4829                                  struct bpf_prog *prog,
4830                                  struct xdp_buff *xdp)
4831 {
4832         u32 act;
4833         int res;
4834
4835         act = bpf_prog_run_xdp(prog, xdp);
4836         switch (act) {
4837         case XDP_PASS:
4838                 res = STMMAC_XDP_PASS;
4839                 break;
4840         case XDP_TX:
4841                 res = stmmac_xdp_xmit_back(priv, xdp);
4842                 break;
4843         case XDP_REDIRECT:
4844                 if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
4845                         res = STMMAC_XDP_CONSUMED;
4846                 else
4847                         res = STMMAC_XDP_REDIRECT;
4848                 break;
4849         default:
4850                 bpf_warn_invalid_xdp_action(priv->dev, prog, act);
4851                 fallthrough;
4852         case XDP_ABORTED:
4853                 trace_xdp_exception(priv->dev, prog, act);
4854                 fallthrough;
4855         case XDP_DROP:
4856                 res = STMMAC_XDP_CONSUMED;
4857                 break;
4858         }
4859
4860         return res;
4861 }
4862
4863 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
4864                                            struct xdp_buff *xdp)
4865 {
4866         struct bpf_prog *prog;
4867         int res;
4868
4869         prog = READ_ONCE(priv->xdp_prog);
4870         if (!prog) {
4871                 res = STMMAC_XDP_PASS;
4872                 goto out;
4873         }
4874
4875         res = __stmmac_xdp_run_prog(priv, prog, xdp);
4876 out:
4877         return ERR_PTR(-res);
4878 }
4879
4880 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
4881                                    int xdp_status)
4882 {
4883         int cpu = smp_processor_id();
4884         int queue;
4885
4886         queue = stmmac_xdp_get_tx_queue(priv, cpu);
4887
4888         if (xdp_status & STMMAC_XDP_TX)
4889                 stmmac_tx_timer_arm(priv, queue);
4890
4891         if (xdp_status & STMMAC_XDP_REDIRECT)
4892                 xdp_do_flush();
4893 }
4894
4895 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
4896                                                struct xdp_buff *xdp)
4897 {
4898         unsigned int metasize = xdp->data - xdp->data_meta;
4899         unsigned int datasize = xdp->data_end - xdp->data;
4900         struct sk_buff *skb;
4901
4902         skb = __napi_alloc_skb(&ch->rxtx_napi,
4903                                xdp->data_end - xdp->data_hard_start,
4904                                GFP_ATOMIC | __GFP_NOWARN);
4905         if (unlikely(!skb))
4906                 return NULL;
4907
4908         skb_reserve(skb, xdp->data - xdp->data_hard_start);
4909         memcpy(__skb_put(skb, datasize), xdp->data, datasize);
4910         if (metasize)
4911                 skb_metadata_set(skb, metasize);
4912
4913         return skb;
4914 }
4915
4916 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
4917                                    struct dma_desc *p, struct dma_desc *np,
4918                                    struct xdp_buff *xdp)
4919 {
4920         struct stmmac_channel *ch = &priv->channel[queue];
4921         unsigned int len = xdp->data_end - xdp->data;
4922         enum pkt_hash_types hash_type;
4923         int coe = priv->hw->rx_csum;
4924         struct sk_buff *skb;
4925         u32 hash;
4926
4927         skb = stmmac_construct_skb_zc(ch, xdp);
4928         if (!skb) {
4929                 priv->dev->stats.rx_dropped++;
4930                 return;
4931         }
4932
4933         stmmac_get_rx_hwtstamp(priv, p, np, skb);
4934         stmmac_rx_vlan(priv->dev, skb);
4935         skb->protocol = eth_type_trans(skb, priv->dev);
4936
4937         if (unlikely(!coe))
4938                 skb_checksum_none_assert(skb);
4939         else
4940                 skb->ip_summed = CHECKSUM_UNNECESSARY;
4941
4942         if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
4943                 skb_set_hash(skb, hash, hash_type);
4944
4945         skb_record_rx_queue(skb, queue);
4946         napi_gro_receive(&ch->rxtx_napi, skb);
4947
4948         priv->dev->stats.rx_packets++;
4949         priv->dev->stats.rx_bytes += len;
4950 }
4951
4952 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
4953 {
4954         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4955         unsigned int entry = rx_q->dirty_rx;
4956         struct dma_desc *rx_desc = NULL;
4957         bool ret = true;
4958
4959         budget = min(budget, stmmac_rx_dirty(priv, queue));
4960
4961         while (budget-- > 0 && entry != rx_q->cur_rx) {
4962                 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4963                 dma_addr_t dma_addr;
4964                 bool use_rx_wd;
4965
4966                 if (!buf->xdp) {
4967                         buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
4968                         if (!buf->xdp) {
4969                                 ret = false;
4970                                 break;
4971                         }
4972                 }
4973
4974                 if (priv->extend_desc)
4975                         rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
4976                 else
4977                         rx_desc = rx_q->dma_rx + entry;
4978
4979                 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
4980                 stmmac_set_desc_addr(priv, rx_desc, dma_addr);
4981                 stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
4982                 stmmac_refill_desc3(priv, rx_q, rx_desc);
4983
4984                 rx_q->rx_count_frames++;
4985                 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4986                 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4987                         rx_q->rx_count_frames = 0;
4988
4989                 use_rx_wd = !priv->rx_coal_frames[queue];
4990                 use_rx_wd |= rx_q->rx_count_frames > 0;
4991                 if (!priv->use_riwt)
4992                         use_rx_wd = false;
4993
4994                 dma_wmb();
4995                 stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
4996
4997                 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4998         }
4999
5000         if (rx_desc) {
5001                 rx_q->dirty_rx = entry;
5002                 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5003                                      (rx_q->dirty_rx * sizeof(struct dma_desc));
5004                 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5005         }
5006
5007         return ret;
5008 }
5009
5010 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5011 {
5012         /* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5013          * to represent incoming packet, whereas cb field in the same structure
5014          * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5015          * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5016          */
5017         return (struct stmmac_xdp_buff *)xdp;
5018 }
5019
5020 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5021 {
5022         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5023         unsigned int count = 0, error = 0, len = 0;
5024         int dirty = stmmac_rx_dirty(priv, queue);
5025         unsigned int next_entry = rx_q->cur_rx;
5026         unsigned int desc_size;
5027         struct bpf_prog *prog;
5028         bool failure = false;
5029         int xdp_status = 0;
5030         int status = 0;
5031
5032         if (netif_msg_rx_status(priv)) {
5033                 void *rx_head;
5034
5035                 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5036                 if (priv->extend_desc) {
5037                         rx_head = (void *)rx_q->dma_erx;
5038                         desc_size = sizeof(struct dma_extended_desc);
5039                 } else {
5040                         rx_head = (void *)rx_q->dma_rx;
5041                         desc_size = sizeof(struct dma_desc);
5042                 }
5043
5044                 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5045                                     rx_q->dma_rx_phy, desc_size);
5046         }
5047         while (count < limit) {
5048                 struct stmmac_rx_buffer *buf;
5049                 struct stmmac_xdp_buff *ctx;
5050                 unsigned int buf1_len = 0;
5051                 struct dma_desc *np, *p;
5052                 int entry;
5053                 int res;
5054
5055                 if (!count && rx_q->state_saved) {
5056                         error = rx_q->state.error;
5057                         len = rx_q->state.len;
5058                 } else {
5059                         rx_q->state_saved = false;
5060                         error = 0;
5061                         len = 0;
5062                 }
5063
5064                 if (count >= limit)
5065                         break;
5066
5067 read_again:
5068                 buf1_len = 0;
5069                 entry = next_entry;
5070                 buf = &rx_q->buf_pool[entry];
5071
5072                 if (dirty >= STMMAC_RX_FILL_BATCH) {
5073                         failure = failure ||
5074                                   !stmmac_rx_refill_zc(priv, queue, dirty);
5075                         dirty = 0;
5076                 }
5077
5078                 if (priv->extend_desc)
5079                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
5080                 else
5081                         p = rx_q->dma_rx + entry;
5082
5083                 /* read the status of the incoming frame */
5084                 status = stmmac_rx_status(priv, &priv->dev->stats,
5085                                           &priv->xstats, p);
5086                 /* check if managed by the DMA otherwise go ahead */
5087                 if (unlikely(status & dma_own))
5088                         break;
5089
5090                 /* Prefetch the next RX descriptor */
5091                 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5092                                                 priv->dma_conf.dma_rx_size);
5093                 next_entry = rx_q->cur_rx;
5094
5095                 if (priv->extend_desc)
5096                         np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5097                 else
5098                         np = rx_q->dma_rx + next_entry;
5099
5100                 prefetch(np);
5101
5102                 /* Ensure a valid XSK buffer before proceed */
5103                 if (!buf->xdp)
5104                         break;
5105
5106                 if (priv->extend_desc)
5107                         stmmac_rx_extended_status(priv, &priv->dev->stats,
5108                                                   &priv->xstats,
5109                                                   rx_q->dma_erx + entry);
5110                 if (unlikely(status == discard_frame)) {
5111                         xsk_buff_free(buf->xdp);
5112                         buf->xdp = NULL;
5113                         dirty++;
5114                         error = 1;
5115                         if (!priv->hwts_rx_en)
5116                                 priv->dev->stats.rx_errors++;
5117                 }
5118
5119                 if (unlikely(error && (status & rx_not_ls)))
5120                         goto read_again;
5121                 if (unlikely(error)) {
5122                         count++;
5123                         continue;
5124                 }
5125
5126                 /* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5127                 if (likely(status & rx_not_ls)) {
5128                         xsk_buff_free(buf->xdp);
5129                         buf->xdp = NULL;
5130                         dirty++;
5131                         count++;
5132                         goto read_again;
5133                 }
5134
5135                 ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5136                 ctx->priv = priv;
5137                 ctx->desc = p;
5138                 ctx->ndesc = np;
5139
5140                 /* XDP ZC Frame only support primary buffers for now */
5141                 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5142                 len += buf1_len;
5143
5144                 /* ACS is disabled; strip manually. */
5145                 if (likely(!(status & rx_not_ls))) {
5146                         buf1_len -= ETH_FCS_LEN;
5147                         len -= ETH_FCS_LEN;
5148                 }
5149
5150                 /* RX buffer is good and fit into a XSK pool buffer */
5151                 buf->xdp->data_end = buf->xdp->data + buf1_len;
5152                 xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5153
5154                 prog = READ_ONCE(priv->xdp_prog);
5155                 res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5156
5157                 switch (res) {
5158                 case STMMAC_XDP_PASS:
5159                         stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5160                         xsk_buff_free(buf->xdp);
5161                         break;
5162                 case STMMAC_XDP_CONSUMED:
5163                         xsk_buff_free(buf->xdp);
5164                         priv->dev->stats.rx_dropped++;
5165                         break;
5166                 case STMMAC_XDP_TX:
5167                 case STMMAC_XDP_REDIRECT:
5168                         xdp_status |= res;
5169                         break;
5170                 }
5171
5172                 buf->xdp = NULL;
5173                 dirty++;
5174                 count++;
5175         }
5176
5177         if (status & rx_not_ls) {
5178                 rx_q->state_saved = true;
5179                 rx_q->state.error = error;
5180                 rx_q->state.len = len;
5181         }
5182
5183         stmmac_finalize_xdp_rx(priv, xdp_status);
5184
5185         priv->xstats.rx_pkt_n += count;
5186         priv->xstats.rxq_stats[queue].rx_pkt_n += count;
5187
5188         if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5189                 if (failure || stmmac_rx_dirty(priv, queue) > 0)
5190                         xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5191                 else
5192                         xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5193
5194                 return (int)count;
5195         }
5196
5197         return failure ? limit : (int)count;
5198 }
5199
5200 /**
5201  * stmmac_rx - manage the receive process
5202  * @priv: driver private structure
5203  * @limit: napi bugget
5204  * @queue: RX queue index.
5205  * Description :  this the function called by the napi poll method.
5206  * It gets all the frames inside the ring.
5207  */
5208 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5209 {
5210         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5211         struct stmmac_channel *ch = &priv->channel[queue];
5212         unsigned int count = 0, error = 0, len = 0;
5213         int status = 0, coe = priv->hw->rx_csum;
5214         unsigned int next_entry = rx_q->cur_rx;
5215         enum dma_data_direction dma_dir;
5216         unsigned int desc_size;
5217         struct sk_buff *skb = NULL;
5218         struct stmmac_xdp_buff ctx;
5219         int xdp_status = 0;
5220         int buf_sz;
5221
5222         dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5223         buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5224
5225         if (netif_msg_rx_status(priv)) {
5226                 void *rx_head;
5227
5228                 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5229                 if (priv->extend_desc) {
5230                         rx_head = (void *)rx_q->dma_erx;
5231                         desc_size = sizeof(struct dma_extended_desc);
5232                 } else {
5233                         rx_head = (void *)rx_q->dma_rx;
5234                         desc_size = sizeof(struct dma_desc);
5235                 }
5236
5237                 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5238                                     rx_q->dma_rx_phy, desc_size);
5239         }
5240         while (count < limit) {
5241                 unsigned int buf1_len = 0, buf2_len = 0;
5242                 enum pkt_hash_types hash_type;
5243                 struct stmmac_rx_buffer *buf;
5244                 struct dma_desc *np, *p;
5245                 int entry;
5246                 u32 hash;
5247
5248                 if (!count && rx_q->state_saved) {
5249                         skb = rx_q->state.skb;
5250                         error = rx_q->state.error;
5251                         len = rx_q->state.len;
5252                 } else {
5253                         rx_q->state_saved = false;
5254                         skb = NULL;
5255                         error = 0;
5256                         len = 0;
5257                 }
5258
5259                 if (count >= limit)
5260                         break;
5261
5262 read_again:
5263                 buf1_len = 0;
5264                 buf2_len = 0;
5265                 entry = next_entry;
5266                 buf = &rx_q->buf_pool[entry];
5267
5268                 if (priv->extend_desc)
5269                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
5270                 else
5271                         p = rx_q->dma_rx + entry;
5272
5273                 /* read the status of the incoming frame */
5274                 status = stmmac_rx_status(priv, &priv->dev->stats,
5275                                 &priv->xstats, p);
5276                 /* check if managed by the DMA otherwise go ahead */
5277                 if (unlikely(status & dma_own))
5278                         break;
5279
5280                 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5281                                                 priv->dma_conf.dma_rx_size);
5282                 next_entry = rx_q->cur_rx;
5283
5284                 if (priv->extend_desc)
5285                         np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5286                 else
5287                         np = rx_q->dma_rx + next_entry;
5288
5289                 prefetch(np);
5290
5291                 if (priv->extend_desc)
5292                         stmmac_rx_extended_status(priv, &priv->dev->stats,
5293                                         &priv->xstats, rx_q->dma_erx + entry);
5294                 if (unlikely(status == discard_frame)) {
5295                         page_pool_recycle_direct(rx_q->page_pool, buf->page);
5296                         buf->page = NULL;
5297                         error = 1;
5298                         if (!priv->hwts_rx_en)
5299                                 priv->dev->stats.rx_errors++;
5300                 }
5301
5302                 if (unlikely(error && (status & rx_not_ls)))
5303                         goto read_again;
5304                 if (unlikely(error)) {
5305                         dev_kfree_skb(skb);
5306                         skb = NULL;
5307                         count++;
5308                         continue;
5309                 }
5310
5311                 /* Buffer is good. Go on. */
5312
5313                 prefetch(page_address(buf->page) + buf->page_offset);
5314                 if (buf->sec_page)
5315                         prefetch(page_address(buf->sec_page));
5316
5317                 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5318                 len += buf1_len;
5319                 buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5320                 len += buf2_len;
5321
5322                 /* ACS is disabled; strip manually. */
5323                 if (likely(!(status & rx_not_ls))) {
5324                         if (buf2_len) {
5325                                 buf2_len -= ETH_FCS_LEN;
5326                                 len -= ETH_FCS_LEN;
5327                         } else if (buf1_len) {
5328                                 buf1_len -= ETH_FCS_LEN;
5329                                 len -= ETH_FCS_LEN;
5330                         }
5331                 }
5332
5333                 if (!skb) {
5334                         unsigned int pre_len, sync_len;
5335
5336                         dma_sync_single_for_cpu(priv->device, buf->addr,
5337                                                 buf1_len, dma_dir);
5338
5339                         xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5340                         xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5341                                          buf->page_offset, buf1_len, true);
5342
5343                         pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5344                                   buf->page_offset;
5345
5346                         ctx.priv = priv;
5347                         ctx.desc = p;
5348                         ctx.ndesc = np;
5349
5350                         skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5351                         /* Due xdp_adjust_tail: DMA sync for_device
5352                          * cover max len CPU touch
5353                          */
5354                         sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5355                                    buf->page_offset;
5356                         sync_len = max(sync_len, pre_len);
5357
5358                         /* For Not XDP_PASS verdict */
5359                         if (IS_ERR(skb)) {
5360                                 unsigned int xdp_res = -PTR_ERR(skb);
5361
5362                                 if (xdp_res & STMMAC_XDP_CONSUMED) {
5363                                         page_pool_put_page(rx_q->page_pool,
5364                                                            virt_to_head_page(ctx.xdp.data),
5365                                                            sync_len, true);
5366                                         buf->page = NULL;
5367                                         priv->dev->stats.rx_dropped++;
5368
5369                                         /* Clear skb as it was set as
5370                                          * status by XDP program.
5371                                          */
5372                                         skb = NULL;
5373
5374                                         if (unlikely((status & rx_not_ls)))
5375                                                 goto read_again;
5376
5377                                         count++;
5378                                         continue;
5379                                 } else if (xdp_res & (STMMAC_XDP_TX |
5380                                                       STMMAC_XDP_REDIRECT)) {
5381                                         xdp_status |= xdp_res;
5382                                         buf->page = NULL;
5383                                         skb = NULL;
5384                                         count++;
5385                                         continue;
5386                                 }
5387                         }
5388                 }
5389
5390                 if (!skb) {
5391                         /* XDP program may expand or reduce tail */
5392                         buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5393
5394                         skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5395                         if (!skb) {
5396                                 priv->dev->stats.rx_dropped++;
5397                                 count++;
5398                                 goto drain_data;
5399                         }
5400
5401                         /* XDP program may adjust header */
5402                         skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5403                         skb_put(skb, buf1_len);
5404
5405                         /* Data payload copied into SKB, page ready for recycle */
5406                         page_pool_recycle_direct(rx_q->page_pool, buf->page);
5407                         buf->page = NULL;
5408                 } else if (buf1_len) {
5409                         dma_sync_single_for_cpu(priv->device, buf->addr,
5410                                                 buf1_len, dma_dir);
5411                         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5412                                         buf->page, buf->page_offset, buf1_len,
5413                                         priv->dma_conf.dma_buf_sz);
5414
5415                         /* Data payload appended into SKB */
5416                         page_pool_release_page(rx_q->page_pool, buf->page);
5417                         buf->page = NULL;
5418                 }
5419
5420                 if (buf2_len) {
5421                         dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5422                                                 buf2_len, dma_dir);
5423                         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5424                                         buf->sec_page, 0, buf2_len,
5425                                         priv->dma_conf.dma_buf_sz);
5426
5427                         /* Data payload appended into SKB */
5428                         page_pool_release_page(rx_q->page_pool, buf->sec_page);
5429                         buf->sec_page = NULL;
5430                 }
5431
5432 drain_data:
5433                 if (likely(status & rx_not_ls))
5434                         goto read_again;
5435                 if (!skb)
5436                         continue;
5437
5438                 /* Got entire packet into SKB. Finish it. */
5439
5440                 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5441                 stmmac_rx_vlan(priv->dev, skb);
5442                 skb->protocol = eth_type_trans(skb, priv->dev);
5443
5444                 if (unlikely(!coe))
5445                         skb_checksum_none_assert(skb);
5446                 else
5447                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5448
5449                 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5450                         skb_set_hash(skb, hash, hash_type);
5451
5452                 skb_record_rx_queue(skb, queue);
5453                 napi_gro_receive(&ch->rx_napi, skb);
5454                 skb = NULL;
5455
5456                 priv->dev->stats.rx_packets++;
5457                 priv->dev->stats.rx_bytes += len;
5458                 count++;
5459         }
5460
5461         if (status & rx_not_ls || skb) {
5462                 rx_q->state_saved = true;
5463                 rx_q->state.skb = skb;
5464                 rx_q->state.error = error;
5465                 rx_q->state.len = len;
5466         }
5467
5468         stmmac_finalize_xdp_rx(priv, xdp_status);
5469
5470         stmmac_rx_refill(priv, queue);
5471
5472         priv->xstats.rx_pkt_n += count;
5473         priv->xstats.rxq_stats[queue].rx_pkt_n += count;
5474
5475         return count;
5476 }
5477
5478 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5479 {
5480         struct stmmac_channel *ch =
5481                 container_of(napi, struct stmmac_channel, rx_napi);
5482         struct stmmac_priv *priv = ch->priv_data;
5483         u32 chan = ch->index;
5484         int work_done;
5485
5486         priv->xstats.napi_poll++;
5487
5488         work_done = stmmac_rx(priv, budget, chan);
5489         if (work_done < budget && napi_complete_done(napi, work_done)) {
5490                 unsigned long flags;
5491
5492                 spin_lock_irqsave(&ch->lock, flags);
5493                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5494                 spin_unlock_irqrestore(&ch->lock, flags);
5495         }
5496
5497         return work_done;
5498 }
5499
5500 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5501 {
5502         struct stmmac_channel *ch =
5503                 container_of(napi, struct stmmac_channel, tx_napi);
5504         struct stmmac_priv *priv = ch->priv_data;
5505         u32 chan = ch->index;
5506         int work_done;
5507
5508         priv->xstats.napi_poll++;
5509
5510         work_done = stmmac_tx_clean(priv, budget, chan);
5511         work_done = min(work_done, budget);
5512
5513         if (work_done < budget && napi_complete_done(napi, work_done)) {
5514                 unsigned long flags;
5515
5516                 spin_lock_irqsave(&ch->lock, flags);
5517                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5518                 spin_unlock_irqrestore(&ch->lock, flags);
5519         }
5520
5521         return work_done;
5522 }
5523
5524 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5525 {
5526         struct stmmac_channel *ch =
5527                 container_of(napi, struct stmmac_channel, rxtx_napi);
5528         struct stmmac_priv *priv = ch->priv_data;
5529         int rx_done, tx_done, rxtx_done;
5530         u32 chan = ch->index;
5531
5532         priv->xstats.napi_poll++;
5533
5534         tx_done = stmmac_tx_clean(priv, budget, chan);
5535         tx_done = min(tx_done, budget);
5536
5537         rx_done = stmmac_rx_zc(priv, budget, chan);
5538
5539         rxtx_done = max(tx_done, rx_done);
5540
5541         /* If either TX or RX work is not complete, return budget
5542          * and keep pooling
5543          */
5544         if (rxtx_done >= budget)
5545                 return budget;
5546
5547         /* all work done, exit the polling mode */
5548         if (napi_complete_done(napi, rxtx_done)) {
5549                 unsigned long flags;
5550
5551                 spin_lock_irqsave(&ch->lock, flags);
5552                 /* Both RX and TX work done are compelte,
5553                  * so enable both RX & TX IRQs.
5554                  */
5555                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5556                 spin_unlock_irqrestore(&ch->lock, flags);
5557         }
5558
5559         return min(rxtx_done, budget - 1);
5560 }
5561
5562 /**
5563  *  stmmac_tx_timeout
5564  *  @dev : Pointer to net device structure
5565  *  @txqueue: the index of the hanging transmit queue
5566  *  Description: this function is called when a packet transmission fails to
5567  *   complete within a reasonable time. The driver will mark the error in the
5568  *   netdev structure and arrange for the device to be reset to a sane state
5569  *   in order to transmit a new packet.
5570  */
5571 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5572 {
5573         struct stmmac_priv *priv = netdev_priv(dev);
5574
5575         stmmac_global_err(priv);
5576 }
5577
5578 /**
5579  *  stmmac_set_rx_mode - entry point for multicast addressing
5580  *  @dev : pointer to the device structure
5581  *  Description:
5582  *  This function is a driver entry point which gets called by the kernel
5583  *  whenever multicast addresses must be enabled/disabled.
5584  *  Return value:
5585  *  void.
5586  */
5587 static void stmmac_set_rx_mode(struct net_device *dev)
5588 {
5589         struct stmmac_priv *priv = netdev_priv(dev);
5590
5591         stmmac_set_filter(priv, priv->hw, dev);
5592 }
5593
5594 /**
5595  *  stmmac_change_mtu - entry point to change MTU size for the device.
5596  *  @dev : device pointer.
5597  *  @new_mtu : the new MTU size for the device.
5598  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5599  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5600  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5601  *  Return value:
5602  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5603  *  file on failure.
5604  */
5605 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5606 {
5607         struct stmmac_priv *priv = netdev_priv(dev);
5608         int txfifosz = priv->plat->tx_fifo_size;
5609         struct stmmac_dma_conf *dma_conf;
5610         const int mtu = new_mtu;
5611         int ret;
5612
5613         if (txfifosz == 0)
5614                 txfifosz = priv->dma_cap.tx_fifo_size;
5615
5616         txfifosz /= priv->plat->tx_queues_to_use;
5617
5618         if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5619                 netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5620                 return -EINVAL;
5621         }
5622
5623         new_mtu = STMMAC_ALIGN(new_mtu);
5624
5625         /* If condition true, FIFO is too small or MTU too large */
5626         if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5627                 return -EINVAL;
5628
5629         if (netif_running(dev)) {
5630                 netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5631                 /* Try to allocate the new DMA conf with the new mtu */
5632                 dma_conf = stmmac_setup_dma_desc(priv, mtu);
5633                 if (IS_ERR(dma_conf)) {
5634                         netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5635                                    mtu);
5636                         return PTR_ERR(dma_conf);
5637                 }
5638
5639                 stmmac_release(dev);
5640
5641                 ret = __stmmac_open(dev, dma_conf);
5642                 if (ret) {
5643                         free_dma_desc_resources(priv, dma_conf);
5644                         kfree(dma_conf);
5645                         netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5646                         return ret;
5647                 }
5648
5649                 kfree(dma_conf);
5650
5651                 stmmac_set_rx_mode(dev);
5652         }
5653
5654         dev->mtu = mtu;
5655         netdev_update_features(dev);
5656
5657         return 0;
5658 }
5659
5660 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5661                                              netdev_features_t features)
5662 {
5663         struct stmmac_priv *priv = netdev_priv(dev);
5664
5665         if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5666                 features &= ~NETIF_F_RXCSUM;
5667
5668         if (!priv->plat->tx_coe)
5669                 features &= ~NETIF_F_CSUM_MASK;
5670
5671         /* Some GMAC devices have a bugged Jumbo frame support that
5672          * needs to have the Tx COE disabled for oversized frames
5673          * (due to limited buffer sizes). In this case we disable
5674          * the TX csum insertion in the TDES and not use SF.
5675          */
5676         if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5677                 features &= ~NETIF_F_CSUM_MASK;
5678
5679         /* Disable tso if asked by ethtool */
5680         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
5681                 if (features & NETIF_F_TSO)
5682                         priv->tso = true;
5683                 else
5684                         priv->tso = false;
5685         }
5686
5687         return features;
5688 }
5689
5690 static int stmmac_set_features(struct net_device *netdev,
5691                                netdev_features_t features)
5692 {
5693         struct stmmac_priv *priv = netdev_priv(netdev);
5694
5695         /* Keep the COE Type in case of csum is supporting */
5696         if (features & NETIF_F_RXCSUM)
5697                 priv->hw->rx_csum = priv->plat->rx_coe;
5698         else
5699                 priv->hw->rx_csum = 0;
5700         /* No check needed because rx_coe has been set before and it will be
5701          * fixed in case of issue.
5702          */
5703         stmmac_rx_ipc(priv, priv->hw);
5704
5705         if (priv->sph_cap) {
5706                 bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5707                 u32 chan;
5708
5709                 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5710                         stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5711         }
5712
5713         return 0;
5714 }
5715
5716 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5717 {
5718         struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5719         enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5720         enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5721         bool *hs_enable = &fpe_cfg->hs_enable;
5722
5723         if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5724                 return;
5725
5726         /* If LP has sent verify mPacket, LP is FPE capable */
5727         if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5728                 if (*lp_state < FPE_STATE_CAPABLE)
5729                         *lp_state = FPE_STATE_CAPABLE;
5730
5731                 /* If user has requested FPE enable, quickly response */
5732                 if (*hs_enable)
5733                         stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5734                                                 MPACKET_RESPONSE);
5735         }
5736
5737         /* If Local has sent verify mPacket, Local is FPE capable */
5738         if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
5739                 if (*lo_state < FPE_STATE_CAPABLE)
5740                         *lo_state = FPE_STATE_CAPABLE;
5741         }
5742
5743         /* If LP has sent response mPacket, LP is entering FPE ON */
5744         if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
5745                 *lp_state = FPE_STATE_ENTERING_ON;
5746
5747         /* If Local has sent response mPacket, Local is entering FPE ON */
5748         if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
5749                 *lo_state = FPE_STATE_ENTERING_ON;
5750
5751         if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
5752             !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
5753             priv->fpe_wq) {
5754                 queue_work(priv->fpe_wq, &priv->fpe_task);
5755         }
5756 }
5757
5758 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5759 {
5760         u32 rx_cnt = priv->plat->rx_queues_to_use;
5761         u32 tx_cnt = priv->plat->tx_queues_to_use;
5762         u32 queues_count;
5763         u32 queue;
5764         bool xmac;
5765
5766         xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5767         queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5768
5769         if (priv->irq_wake)
5770                 pm_wakeup_event(priv->device, 0);
5771
5772         if (priv->dma_cap.estsel)
5773                 stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
5774                                       &priv->xstats, tx_cnt);
5775
5776         if (priv->dma_cap.fpesel) {
5777                 int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
5778                                                    priv->dev);
5779
5780                 stmmac_fpe_event_status(priv, status);
5781         }
5782
5783         /* To handle GMAC own interrupts */
5784         if ((priv->plat->has_gmac) || xmac) {
5785                 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5786
5787                 if (unlikely(status)) {
5788                         /* For LPI we need to save the tx status */
5789                         if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5790                                 priv->tx_path_in_lpi_mode = true;
5791                         if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5792                                 priv->tx_path_in_lpi_mode = false;
5793                 }
5794
5795                 for (queue = 0; queue < queues_count; queue++) {
5796                         status = stmmac_host_mtl_irq_status(priv, priv->hw,
5797                                                             queue);
5798                 }
5799
5800                 /* PCS link status */
5801                 if (priv->hw->pcs &&
5802                     !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
5803                         if (priv->xstats.pcs_link)
5804                                 netif_carrier_on(priv->dev);
5805                         else
5806                                 netif_carrier_off(priv->dev);
5807                 }
5808
5809                 stmmac_timestamp_interrupt(priv, priv);
5810         }
5811 }
5812
5813 /**
5814  *  stmmac_interrupt - main ISR
5815  *  @irq: interrupt number.
5816  *  @dev_id: to pass the net device pointer.
5817  *  Description: this is the main driver interrupt service routine.
5818  *  It can call:
5819  *  o DMA service routine (to manage incoming frame reception and transmission
5820  *    status)
5821  *  o Core interrupts to manage: remote wake-up, management counter, LPI
5822  *    interrupts.
5823  */
5824 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
5825 {
5826         struct net_device *dev = (struct net_device *)dev_id;
5827         struct stmmac_priv *priv = netdev_priv(dev);
5828
5829         /* Check if adapter is up */
5830         if (test_bit(STMMAC_DOWN, &priv->state))
5831                 return IRQ_HANDLED;
5832
5833         /* Check if a fatal error happened */
5834         if (stmmac_safety_feat_interrupt(priv))
5835                 return IRQ_HANDLED;
5836
5837         /* To handle Common interrupts */
5838         stmmac_common_interrupt(priv);
5839
5840         /* To handle DMA interrupts */
5841         stmmac_dma_interrupt(priv);
5842
5843         return IRQ_HANDLED;
5844 }
5845
5846 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
5847 {
5848         struct net_device *dev = (struct net_device *)dev_id;
5849         struct stmmac_priv *priv = netdev_priv(dev);
5850
5851         if (unlikely(!dev)) {
5852                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5853                 return IRQ_NONE;
5854         }
5855
5856         /* Check if adapter is up */
5857         if (test_bit(STMMAC_DOWN, &priv->state))
5858                 return IRQ_HANDLED;
5859
5860         /* To handle Common interrupts */
5861         stmmac_common_interrupt(priv);
5862
5863         return IRQ_HANDLED;
5864 }
5865
5866 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
5867 {
5868         struct net_device *dev = (struct net_device *)dev_id;
5869         struct stmmac_priv *priv = netdev_priv(dev);
5870
5871         if (unlikely(!dev)) {
5872                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5873                 return IRQ_NONE;
5874         }
5875
5876         /* Check if adapter is up */
5877         if (test_bit(STMMAC_DOWN, &priv->state))
5878                 return IRQ_HANDLED;
5879
5880         /* Check if a fatal error happened */
5881         stmmac_safety_feat_interrupt(priv);
5882
5883         return IRQ_HANDLED;
5884 }
5885
5886 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
5887 {
5888         struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
5889         struct stmmac_dma_conf *dma_conf;
5890         int chan = tx_q->queue_index;
5891         struct stmmac_priv *priv;
5892         int status;
5893
5894         dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
5895         priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
5896
5897         if (unlikely(!data)) {
5898                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5899                 return IRQ_NONE;
5900         }
5901
5902         /* Check if adapter is up */
5903         if (test_bit(STMMAC_DOWN, &priv->state))
5904                 return IRQ_HANDLED;
5905
5906         status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
5907
5908         if (unlikely(status & tx_hard_error_bump_tc)) {
5909                 /* Try to bump up the dma threshold on this failure */
5910                 stmmac_bump_dma_threshold(priv, chan);
5911         } else if (unlikely(status == tx_hard_error)) {
5912                 stmmac_tx_err(priv, chan);
5913         }
5914
5915         return IRQ_HANDLED;
5916 }
5917
5918 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
5919 {
5920         struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
5921         struct stmmac_dma_conf *dma_conf;
5922         int chan = rx_q->queue_index;
5923         struct stmmac_priv *priv;
5924
5925         dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
5926         priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
5927
5928         if (unlikely(!data)) {
5929                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5930                 return IRQ_NONE;
5931         }
5932
5933         /* Check if adapter is up */
5934         if (test_bit(STMMAC_DOWN, &priv->state))
5935                 return IRQ_HANDLED;
5936
5937         stmmac_napi_check(priv, chan, DMA_DIR_RX);
5938
5939         return IRQ_HANDLED;
5940 }
5941
5942 #ifdef CONFIG_NET_POLL_CONTROLLER
5943 /* Polling receive - used by NETCONSOLE and other diagnostic tools
5944  * to allow network I/O with interrupts disabled.
5945  */
5946 static void stmmac_poll_controller(struct net_device *dev)
5947 {
5948         struct stmmac_priv *priv = netdev_priv(dev);
5949         int i;
5950
5951         /* If adapter is down, do nothing */
5952         if (test_bit(STMMAC_DOWN, &priv->state))
5953                 return;
5954
5955         if (priv->plat->multi_msi_en) {
5956                 for (i = 0; i < priv->plat->rx_queues_to_use; i++)
5957                         stmmac_msi_intr_rx(0, &priv->dma_conf.rx_queue[i]);
5958
5959                 for (i = 0; i < priv->plat->tx_queues_to_use; i++)
5960                         stmmac_msi_intr_tx(0, &priv->dma_conf.tx_queue[i]);
5961         } else {
5962                 disable_irq(dev->irq);
5963                 stmmac_interrupt(dev->irq, dev);
5964                 enable_irq(dev->irq);
5965         }
5966 }
5967 #endif
5968
5969 /**
5970  *  stmmac_ioctl - Entry point for the Ioctl
5971  *  @dev: Device pointer.
5972  *  @rq: An IOCTL specefic structure, that can contain a pointer to
5973  *  a proprietary structure used to pass information to the driver.
5974  *  @cmd: IOCTL command
5975  *  Description:
5976  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
5977  */
5978 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5979 {
5980         struct stmmac_priv *priv = netdev_priv (dev);
5981         int ret = -EOPNOTSUPP;
5982
5983         if (!netif_running(dev))
5984                 return -EINVAL;
5985
5986         switch (cmd) {
5987         case SIOCGMIIPHY:
5988         case SIOCGMIIREG:
5989         case SIOCSMIIREG:
5990                 ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
5991                 break;
5992         case SIOCSHWTSTAMP:
5993                 ret = stmmac_hwtstamp_set(dev, rq);
5994                 break;
5995         case SIOCGHWTSTAMP:
5996                 ret = stmmac_hwtstamp_get(dev, rq);
5997                 break;
5998         default:
5999                 break;
6000         }
6001
6002         return ret;
6003 }
6004
6005 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6006                                     void *cb_priv)
6007 {
6008         struct stmmac_priv *priv = cb_priv;
6009         int ret = -EOPNOTSUPP;
6010
6011         if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6012                 return ret;
6013
6014         __stmmac_disable_all_queues(priv);
6015
6016         switch (type) {
6017         case TC_SETUP_CLSU32:
6018                 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6019                 break;
6020         case TC_SETUP_CLSFLOWER:
6021                 ret = stmmac_tc_setup_cls(priv, priv, type_data);
6022                 break;
6023         default:
6024                 break;
6025         }
6026
6027         stmmac_enable_all_queues(priv);
6028         return ret;
6029 }
6030
6031 static LIST_HEAD(stmmac_block_cb_list);
6032
6033 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6034                            void *type_data)
6035 {
6036         struct stmmac_priv *priv = netdev_priv(ndev);
6037
6038         switch (type) {
6039         case TC_QUERY_CAPS:
6040                 return stmmac_tc_query_caps(priv, priv, type_data);
6041         case TC_SETUP_BLOCK:
6042                 return flow_block_cb_setup_simple(type_data,
6043                                                   &stmmac_block_cb_list,
6044                                                   stmmac_setup_tc_block_cb,
6045                                                   priv, priv, true);
6046         case TC_SETUP_QDISC_CBS:
6047                 return stmmac_tc_setup_cbs(priv, priv, type_data);
6048         case TC_SETUP_QDISC_TAPRIO:
6049                 return stmmac_tc_setup_taprio(priv, priv, type_data);
6050         case TC_SETUP_QDISC_ETF:
6051                 return stmmac_tc_setup_etf(priv, priv, type_data);
6052         default:
6053                 return -EOPNOTSUPP;
6054         }
6055 }
6056
6057 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6058                                struct net_device *sb_dev)
6059 {
6060         int gso = skb_shinfo(skb)->gso_type;
6061
6062         if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6063                 /*
6064                  * There is no way to determine the number of TSO/USO
6065                  * capable Queues. Let's use always the Queue 0
6066                  * because if TSO/USO is supported then at least this
6067                  * one will be capable.
6068                  */
6069                 return 0;
6070         }
6071
6072         return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6073 }
6074
6075 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6076 {
6077         struct stmmac_priv *priv = netdev_priv(ndev);
6078         int ret = 0;
6079
6080         ret = pm_runtime_resume_and_get(priv->device);
6081         if (ret < 0)
6082                 return ret;
6083
6084         ret = eth_mac_addr(ndev, addr);
6085         if (ret)
6086                 goto set_mac_error;
6087
6088         stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6089
6090 set_mac_error:
6091         pm_runtime_put(priv->device);
6092
6093         return ret;
6094 }
6095
6096 #ifdef CONFIG_DEBUG_FS
6097 static struct dentry *stmmac_fs_dir;
6098
6099 static void sysfs_display_ring(void *head, int size, int extend_desc,
6100                                struct seq_file *seq, dma_addr_t dma_phy_addr)
6101 {
6102         int i;
6103         struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6104         struct dma_desc *p = (struct dma_desc *)head;
6105         dma_addr_t dma_addr;
6106
6107         for (i = 0; i < size; i++) {
6108                 if (extend_desc) {
6109                         dma_addr = dma_phy_addr + i * sizeof(*ep);
6110                         seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6111                                    i, &dma_addr,
6112                                    le32_to_cpu(ep->basic.des0),
6113                                    le32_to_cpu(ep->basic.des1),
6114                                    le32_to_cpu(ep->basic.des2),
6115                                    le32_to_cpu(ep->basic.des3));
6116                         ep++;
6117                 } else {
6118                         dma_addr = dma_phy_addr + i * sizeof(*p);
6119                         seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6120                                    i, &dma_addr,
6121                                    le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6122                                    le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6123                         p++;
6124                 }
6125                 seq_printf(seq, "\n");
6126         }
6127 }
6128
6129 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6130 {
6131         struct net_device *dev = seq->private;
6132         struct stmmac_priv *priv = netdev_priv(dev);
6133         u32 rx_count = priv->plat->rx_queues_to_use;
6134         u32 tx_count = priv->plat->tx_queues_to_use;
6135         u32 queue;
6136
6137         if ((dev->flags & IFF_UP) == 0)
6138                 return 0;
6139
6140         for (queue = 0; queue < rx_count; queue++) {
6141                 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6142
6143                 seq_printf(seq, "RX Queue %d:\n", queue);
6144
6145                 if (priv->extend_desc) {
6146                         seq_printf(seq, "Extended descriptor ring:\n");
6147                         sysfs_display_ring((void *)rx_q->dma_erx,
6148                                            priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6149                 } else {
6150                         seq_printf(seq, "Descriptor ring:\n");
6151                         sysfs_display_ring((void *)rx_q->dma_rx,
6152                                            priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6153                 }
6154         }
6155
6156         for (queue = 0; queue < tx_count; queue++) {
6157                 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6158
6159                 seq_printf(seq, "TX Queue %d:\n", queue);
6160
6161                 if (priv->extend_desc) {
6162                         seq_printf(seq, "Extended descriptor ring:\n");
6163                         sysfs_display_ring((void *)tx_q->dma_etx,
6164                                            priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6165                 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6166                         seq_printf(seq, "Descriptor ring:\n");
6167                         sysfs_display_ring((void *)tx_q->dma_tx,
6168                                            priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6169                 }
6170         }
6171
6172         return 0;
6173 }
6174 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6175
6176 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6177 {
6178         struct net_device *dev = seq->private;
6179         struct stmmac_priv *priv = netdev_priv(dev);
6180
6181         if (!priv->hw_cap_support) {
6182                 seq_printf(seq, "DMA HW features not supported\n");
6183                 return 0;
6184         }
6185
6186         seq_printf(seq, "==============================\n");
6187         seq_printf(seq, "\tDMA HW features\n");
6188         seq_printf(seq, "==============================\n");
6189
6190         seq_printf(seq, "\t10/100 Mbps: %s\n",
6191                    (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6192         seq_printf(seq, "\t1000 Mbps: %s\n",
6193                    (priv->dma_cap.mbps_1000) ? "Y" : "N");
6194         seq_printf(seq, "\tHalf duplex: %s\n",
6195                    (priv->dma_cap.half_duplex) ? "Y" : "N");
6196         seq_printf(seq, "\tHash Filter: %s\n",
6197                    (priv->dma_cap.hash_filter) ? "Y" : "N");
6198         seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6199                    (priv->dma_cap.multi_addr) ? "Y" : "N");
6200         seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6201                    (priv->dma_cap.pcs) ? "Y" : "N");
6202         seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6203                    (priv->dma_cap.sma_mdio) ? "Y" : "N");
6204         seq_printf(seq, "\tPMT Remote wake up: %s\n",
6205                    (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6206         seq_printf(seq, "\tPMT Magic Frame: %s\n",
6207                    (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6208         seq_printf(seq, "\tRMON module: %s\n",
6209                    (priv->dma_cap.rmon) ? "Y" : "N");
6210         seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6211                    (priv->dma_cap.time_stamp) ? "Y" : "N");
6212         seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6213                    (priv->dma_cap.atime_stamp) ? "Y" : "N");
6214         seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6215                    (priv->dma_cap.eee) ? "Y" : "N");
6216         seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6217         seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6218                    (priv->dma_cap.tx_coe) ? "Y" : "N");
6219         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
6220                 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6221                            (priv->dma_cap.rx_coe) ? "Y" : "N");
6222         } else {
6223                 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6224                            (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6225                 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6226                            (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6227         }
6228         seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6229                    (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6230         seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6231                    priv->dma_cap.number_rx_channel);
6232         seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6233                    priv->dma_cap.number_tx_channel);
6234         seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6235                    priv->dma_cap.number_rx_queues);
6236         seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6237                    priv->dma_cap.number_tx_queues);
6238         seq_printf(seq, "\tEnhanced descriptors: %s\n",
6239                    (priv->dma_cap.enh_desc) ? "Y" : "N");
6240         seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6241         seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6242         seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
6243         seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6244         seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6245                    priv->dma_cap.pps_out_num);
6246         seq_printf(seq, "\tSafety Features: %s\n",
6247                    priv->dma_cap.asp ? "Y" : "N");
6248         seq_printf(seq, "\tFlexible RX Parser: %s\n",
6249                    priv->dma_cap.frpsel ? "Y" : "N");
6250         seq_printf(seq, "\tEnhanced Addressing: %d\n",
6251                    priv->dma_cap.host_dma_width);
6252         seq_printf(seq, "\tReceive Side Scaling: %s\n",
6253                    priv->dma_cap.rssen ? "Y" : "N");
6254         seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6255                    priv->dma_cap.vlhash ? "Y" : "N");
6256         seq_printf(seq, "\tSplit Header: %s\n",
6257                    priv->dma_cap.sphen ? "Y" : "N");
6258         seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6259                    priv->dma_cap.vlins ? "Y" : "N");
6260         seq_printf(seq, "\tDouble VLAN: %s\n",
6261                    priv->dma_cap.dvlan ? "Y" : "N");
6262         seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6263                    priv->dma_cap.l3l4fnum);
6264         seq_printf(seq, "\tARP Offloading: %s\n",
6265                    priv->dma_cap.arpoffsel ? "Y" : "N");
6266         seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6267                    priv->dma_cap.estsel ? "Y" : "N");
6268         seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6269                    priv->dma_cap.fpesel ? "Y" : "N");
6270         seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6271                    priv->dma_cap.tbssel ? "Y" : "N");
6272         return 0;
6273 }
6274 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6275
6276 /* Use network device events to rename debugfs file entries.
6277  */
6278 static int stmmac_device_event(struct notifier_block *unused,
6279                                unsigned long event, void *ptr)
6280 {
6281         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6282         struct stmmac_priv *priv = netdev_priv(dev);
6283
6284         if (dev->netdev_ops != &stmmac_netdev_ops)
6285                 goto done;
6286
6287         switch (event) {
6288         case NETDEV_CHANGENAME:
6289                 if (priv->dbgfs_dir)
6290                         priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6291                                                          priv->dbgfs_dir,
6292                                                          stmmac_fs_dir,
6293                                                          dev->name);
6294                 break;
6295         }
6296 done:
6297         return NOTIFY_DONE;
6298 }
6299
6300 static struct notifier_block stmmac_notifier = {
6301         .notifier_call = stmmac_device_event,
6302 };
6303
6304 static void stmmac_init_fs(struct net_device *dev)
6305 {
6306         struct stmmac_priv *priv = netdev_priv(dev);
6307
6308         rtnl_lock();
6309
6310         /* Create per netdev entries */
6311         priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6312
6313         /* Entry to report DMA RX/TX rings */
6314         debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6315                             &stmmac_rings_status_fops);
6316
6317         /* Entry to report the DMA HW features */
6318         debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6319                             &stmmac_dma_cap_fops);
6320
6321         rtnl_unlock();
6322 }
6323
6324 static void stmmac_exit_fs(struct net_device *dev)
6325 {
6326         struct stmmac_priv *priv = netdev_priv(dev);
6327
6328         debugfs_remove_recursive(priv->dbgfs_dir);
6329 }
6330 #endif /* CONFIG_DEBUG_FS */
6331
6332 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6333 {
6334         unsigned char *data = (unsigned char *)&vid_le;
6335         unsigned char data_byte = 0;
6336         u32 crc = ~0x0;
6337         u32 temp = 0;
6338         int i, bits;
6339
6340         bits = get_bitmask_order(VLAN_VID_MASK);
6341         for (i = 0; i < bits; i++) {
6342                 if ((i % 8) == 0)
6343                         data_byte = data[i / 8];
6344
6345                 temp = ((crc & 1) ^ data_byte) & 1;
6346                 crc >>= 1;
6347                 data_byte >>= 1;
6348
6349                 if (temp)
6350                         crc ^= 0xedb88320;
6351         }
6352
6353         return crc;
6354 }
6355
6356 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6357 {
6358         u32 crc, hash = 0;
6359         __le16 pmatch = 0;
6360         int count = 0;
6361         u16 vid = 0;
6362
6363         for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6364                 __le16 vid_le = cpu_to_le16(vid);
6365                 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6366                 hash |= (1 << crc);
6367                 count++;
6368         }
6369
6370         if (!priv->dma_cap.vlhash) {
6371                 if (count > 2) /* VID = 0 always passes filter */
6372                         return -EOPNOTSUPP;
6373
6374                 pmatch = cpu_to_le16(vid);
6375                 hash = 0;
6376         }
6377
6378         return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6379 }
6380
6381 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6382 {
6383         struct stmmac_priv *priv = netdev_priv(ndev);
6384         bool is_double = false;
6385         int ret;
6386
6387         ret = pm_runtime_resume_and_get(priv->device);
6388         if (ret < 0)
6389                 return ret;
6390
6391         if (be16_to_cpu(proto) == ETH_P_8021AD)
6392                 is_double = true;
6393
6394         set_bit(vid, priv->active_vlans);
6395         ret = stmmac_vlan_update(priv, is_double);
6396         if (ret) {
6397                 clear_bit(vid, priv->active_vlans);
6398                 goto err_pm_put;
6399         }
6400
6401         if (priv->hw->num_vlan) {
6402                 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6403                 if (ret)
6404                         goto err_pm_put;
6405         }
6406 err_pm_put:
6407         pm_runtime_put(priv->device);
6408
6409         return ret;
6410 }
6411
6412 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6413 {
6414         struct stmmac_priv *priv = netdev_priv(ndev);
6415         bool is_double = false;
6416         int ret;
6417
6418         ret = pm_runtime_resume_and_get(priv->device);
6419         if (ret < 0)
6420                 return ret;
6421
6422         if (be16_to_cpu(proto) == ETH_P_8021AD)
6423                 is_double = true;
6424
6425         clear_bit(vid, priv->active_vlans);
6426
6427         if (priv->hw->num_vlan) {
6428                 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6429                 if (ret)
6430                         goto del_vlan_error;
6431         }
6432
6433         ret = stmmac_vlan_update(priv, is_double);
6434
6435 del_vlan_error:
6436         pm_runtime_put(priv->device);
6437
6438         return ret;
6439 }
6440
6441 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6442 {
6443         struct stmmac_priv *priv = netdev_priv(dev);
6444
6445         switch (bpf->command) {
6446         case XDP_SETUP_PROG:
6447                 return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6448         case XDP_SETUP_XSK_POOL:
6449                 return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6450                                              bpf->xsk.queue_id);
6451         default:
6452                 return -EOPNOTSUPP;
6453         }
6454 }
6455
6456 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6457                            struct xdp_frame **frames, u32 flags)
6458 {
6459         struct stmmac_priv *priv = netdev_priv(dev);
6460         int cpu = smp_processor_id();
6461         struct netdev_queue *nq;
6462         int i, nxmit = 0;
6463         int queue;
6464
6465         if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6466                 return -ENETDOWN;
6467
6468         if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6469                 return -EINVAL;
6470
6471         queue = stmmac_xdp_get_tx_queue(priv, cpu);
6472         nq = netdev_get_tx_queue(priv->dev, queue);
6473
6474         __netif_tx_lock(nq, cpu);
6475         /* Avoids TX time-out as we are sharing with slow path */
6476         txq_trans_cond_update(nq);
6477
6478         for (i = 0; i < num_frames; i++) {
6479                 int res;
6480
6481                 res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6482                 if (res == STMMAC_XDP_CONSUMED)
6483                         break;
6484
6485                 nxmit++;
6486         }
6487
6488         if (flags & XDP_XMIT_FLUSH) {
6489                 stmmac_flush_tx_descriptors(priv, queue);
6490                 stmmac_tx_timer_arm(priv, queue);
6491         }
6492
6493         __netif_tx_unlock(nq);
6494
6495         return nxmit;
6496 }
6497
6498 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6499 {
6500         struct stmmac_channel *ch = &priv->channel[queue];
6501         unsigned long flags;
6502
6503         spin_lock_irqsave(&ch->lock, flags);
6504         stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6505         spin_unlock_irqrestore(&ch->lock, flags);
6506
6507         stmmac_stop_rx_dma(priv, queue);
6508         __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6509 }
6510
6511 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6512 {
6513         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6514         struct stmmac_channel *ch = &priv->channel[queue];
6515         unsigned long flags;
6516         u32 buf_size;
6517         int ret;
6518
6519         ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6520         if (ret) {
6521                 netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6522                 return;
6523         }
6524
6525         ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6526         if (ret) {
6527                 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6528                 netdev_err(priv->dev, "Failed to init RX desc.\n");
6529                 return;
6530         }
6531
6532         stmmac_reset_rx_queue(priv, queue);
6533         stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6534
6535         stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6536                             rx_q->dma_rx_phy, rx_q->queue_index);
6537
6538         rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6539                              sizeof(struct dma_desc));
6540         stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6541                                rx_q->rx_tail_addr, rx_q->queue_index);
6542
6543         if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6544                 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6545                 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6546                                       buf_size,
6547                                       rx_q->queue_index);
6548         } else {
6549                 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6550                                       priv->dma_conf.dma_buf_sz,
6551                                       rx_q->queue_index);
6552         }
6553
6554         stmmac_start_rx_dma(priv, queue);
6555
6556         spin_lock_irqsave(&ch->lock, flags);
6557         stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6558         spin_unlock_irqrestore(&ch->lock, flags);
6559 }
6560
6561 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6562 {
6563         struct stmmac_channel *ch = &priv->channel[queue];
6564         unsigned long flags;
6565
6566         spin_lock_irqsave(&ch->lock, flags);
6567         stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6568         spin_unlock_irqrestore(&ch->lock, flags);
6569
6570         stmmac_stop_tx_dma(priv, queue);
6571         __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6572 }
6573
6574 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6575 {
6576         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6577         struct stmmac_channel *ch = &priv->channel[queue];
6578         unsigned long flags;
6579         int ret;
6580
6581         ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6582         if (ret) {
6583                 netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6584                 return;
6585         }
6586
6587         ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6588         if (ret) {
6589                 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6590                 netdev_err(priv->dev, "Failed to init TX desc.\n");
6591                 return;
6592         }
6593
6594         stmmac_reset_tx_queue(priv, queue);
6595         stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6596
6597         stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6598                             tx_q->dma_tx_phy, tx_q->queue_index);
6599
6600         if (tx_q->tbs & STMMAC_TBS_AVAIL)
6601                 stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6602
6603         tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6604         stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6605                                tx_q->tx_tail_addr, tx_q->queue_index);
6606
6607         stmmac_start_tx_dma(priv, queue);
6608
6609         spin_lock_irqsave(&ch->lock, flags);
6610         stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6611         spin_unlock_irqrestore(&ch->lock, flags);
6612 }
6613
6614 void stmmac_xdp_release(struct net_device *dev)
6615 {
6616         struct stmmac_priv *priv = netdev_priv(dev);
6617         u32 chan;
6618
6619         /* Ensure tx function is not running */
6620         netif_tx_disable(dev);
6621
6622         /* Disable NAPI process */
6623         stmmac_disable_all_queues(priv);
6624
6625         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6626                 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6627
6628         /* Free the IRQ lines */
6629         stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6630
6631         /* Stop TX/RX DMA channels */
6632         stmmac_stop_all_dma(priv);
6633
6634         /* Release and free the Rx/Tx resources */
6635         free_dma_desc_resources(priv, &priv->dma_conf);
6636
6637         /* Disable the MAC Rx/Tx */
6638         stmmac_mac_set(priv, priv->ioaddr, false);
6639
6640         /* set trans_start so we don't get spurious
6641          * watchdogs during reset
6642          */
6643         netif_trans_update(dev);
6644         netif_carrier_off(dev);
6645 }
6646
6647 int stmmac_xdp_open(struct net_device *dev)
6648 {
6649         struct stmmac_priv *priv = netdev_priv(dev);
6650         u32 rx_cnt = priv->plat->rx_queues_to_use;
6651         u32 tx_cnt = priv->plat->tx_queues_to_use;
6652         u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6653         struct stmmac_rx_queue *rx_q;
6654         struct stmmac_tx_queue *tx_q;
6655         u32 buf_size;
6656         bool sph_en;
6657         u32 chan;
6658         int ret;
6659
6660         ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6661         if (ret < 0) {
6662                 netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6663                            __func__);
6664                 goto dma_desc_error;
6665         }
6666
6667         ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6668         if (ret < 0) {
6669                 netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6670                            __func__);
6671                 goto init_error;
6672         }
6673
6674         stmmac_reset_queues_param(priv);
6675
6676         /* DMA CSR Channel configuration */
6677         for (chan = 0; chan < dma_csr_ch; chan++) {
6678                 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6679                 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6680         }
6681
6682         /* Adjust Split header */
6683         sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6684
6685         /* DMA RX Channel Configuration */
6686         for (chan = 0; chan < rx_cnt; chan++) {
6687                 rx_q = &priv->dma_conf.rx_queue[chan];
6688
6689                 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6690                                     rx_q->dma_rx_phy, chan);
6691
6692                 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6693                                      (rx_q->buf_alloc_num *
6694                                       sizeof(struct dma_desc));
6695                 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6696                                        rx_q->rx_tail_addr, chan);
6697
6698                 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6699                         buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6700                         stmmac_set_dma_bfsize(priv, priv->ioaddr,
6701                                               buf_size,
6702                                               rx_q->queue_index);
6703                 } else {
6704                         stmmac_set_dma_bfsize(priv, priv->ioaddr,
6705                                               priv->dma_conf.dma_buf_sz,
6706                                               rx_q->queue_index);
6707                 }
6708
6709                 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6710         }
6711
6712         /* DMA TX Channel Configuration */
6713         for (chan = 0; chan < tx_cnt; chan++) {
6714                 tx_q = &priv->dma_conf.tx_queue[chan];
6715
6716                 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6717                                     tx_q->dma_tx_phy, chan);
6718
6719                 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6720                 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6721                                        tx_q->tx_tail_addr, chan);
6722
6723                 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6724                 tx_q->txtimer.function = stmmac_tx_timer;
6725         }
6726
6727         /* Enable the MAC Rx/Tx */
6728         stmmac_mac_set(priv, priv->ioaddr, true);
6729
6730         /* Start Rx & Tx DMA Channels */
6731         stmmac_start_all_dma(priv);
6732
6733         ret = stmmac_request_irq(dev);
6734         if (ret)
6735                 goto irq_error;
6736
6737         /* Enable NAPI process*/
6738         stmmac_enable_all_queues(priv);
6739         netif_carrier_on(dev);
6740         netif_tx_start_all_queues(dev);
6741         stmmac_enable_all_dma_irq(priv);
6742
6743         return 0;
6744
6745 irq_error:
6746         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6747                 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6748
6749         stmmac_hw_teardown(dev);
6750 init_error:
6751         free_dma_desc_resources(priv, &priv->dma_conf);
6752 dma_desc_error:
6753         return ret;
6754 }
6755
6756 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6757 {
6758         struct stmmac_priv *priv = netdev_priv(dev);
6759         struct stmmac_rx_queue *rx_q;
6760         struct stmmac_tx_queue *tx_q;
6761         struct stmmac_channel *ch;
6762
6763         if (test_bit(STMMAC_DOWN, &priv->state) ||
6764             !netif_carrier_ok(priv->dev))
6765                 return -ENETDOWN;
6766
6767         if (!stmmac_xdp_is_enabled(priv))
6768                 return -EINVAL;
6769
6770         if (queue >= priv->plat->rx_queues_to_use ||
6771             queue >= priv->plat->tx_queues_to_use)
6772                 return -EINVAL;
6773
6774         rx_q = &priv->dma_conf.rx_queue[queue];
6775         tx_q = &priv->dma_conf.tx_queue[queue];
6776         ch = &priv->channel[queue];
6777
6778         if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6779                 return -EINVAL;
6780
6781         if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6782                 /* EQoS does not have per-DMA channel SW interrupt,
6783                  * so we schedule RX Napi straight-away.
6784                  */
6785                 if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6786                         __napi_schedule(&ch->rxtx_napi);
6787         }
6788
6789         return 0;
6790 }
6791
6792 static const struct net_device_ops stmmac_netdev_ops = {
6793         .ndo_open = stmmac_open,
6794         .ndo_start_xmit = stmmac_xmit,
6795         .ndo_stop = stmmac_release,
6796         .ndo_change_mtu = stmmac_change_mtu,
6797         .ndo_fix_features = stmmac_fix_features,
6798         .ndo_set_features = stmmac_set_features,
6799         .ndo_set_rx_mode = stmmac_set_rx_mode,
6800         .ndo_tx_timeout = stmmac_tx_timeout,
6801         .ndo_eth_ioctl = stmmac_ioctl,
6802         .ndo_setup_tc = stmmac_setup_tc,
6803         .ndo_select_queue = stmmac_select_queue,
6804 #ifdef CONFIG_NET_POLL_CONTROLLER
6805         .ndo_poll_controller = stmmac_poll_controller,
6806 #endif
6807         .ndo_set_mac_address = stmmac_set_mac_address,
6808         .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
6809         .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
6810         .ndo_bpf = stmmac_bpf,
6811         .ndo_xdp_xmit = stmmac_xdp_xmit,
6812         .ndo_xsk_wakeup = stmmac_xsk_wakeup,
6813 };
6814
6815 static void stmmac_reset_subtask(struct stmmac_priv *priv)
6816 {
6817         if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
6818                 return;
6819         if (test_bit(STMMAC_DOWN, &priv->state))
6820                 return;
6821
6822         netdev_err(priv->dev, "Reset adapter.\n");
6823
6824         rtnl_lock();
6825         netif_trans_update(priv->dev);
6826         while (test_and_set_bit(STMMAC_RESETING, &priv->state))
6827                 usleep_range(1000, 2000);
6828
6829         set_bit(STMMAC_DOWN, &priv->state);
6830         dev_close(priv->dev);
6831         dev_open(priv->dev, NULL);
6832         clear_bit(STMMAC_DOWN, &priv->state);
6833         clear_bit(STMMAC_RESETING, &priv->state);
6834         rtnl_unlock();
6835 }
6836
6837 static void stmmac_service_task(struct work_struct *work)
6838 {
6839         struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
6840                         service_task);
6841
6842         stmmac_reset_subtask(priv);
6843         clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
6844 }
6845
6846 /**
6847  *  stmmac_hw_init - Init the MAC device
6848  *  @priv: driver private structure
6849  *  Description: this function is to configure the MAC device according to
6850  *  some platform parameters or the HW capability register. It prepares the
6851  *  driver to use either ring or chain modes and to setup either enhanced or
6852  *  normal descriptors.
6853  */
6854 static int stmmac_hw_init(struct stmmac_priv *priv)
6855 {
6856         int ret;
6857
6858         /* dwmac-sun8i only work in chain mode */
6859         if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
6860                 chain_mode = 1;
6861         priv->chain_mode = chain_mode;
6862
6863         /* Initialize HW Interface */
6864         ret = stmmac_hwif_init(priv);
6865         if (ret)
6866                 return ret;
6867
6868         /* Get the HW capability (new GMAC newer than 3.50a) */
6869         priv->hw_cap_support = stmmac_get_hw_features(priv);
6870         if (priv->hw_cap_support) {
6871                 dev_info(priv->device, "DMA HW capability register supported\n");
6872
6873                 /* We can override some gmac/dma configuration fields: e.g.
6874                  * enh_desc, tx_coe (e.g. that are passed through the
6875                  * platform) with the values from the HW capability
6876                  * register (if supported).
6877                  */
6878                 priv->plat->enh_desc = priv->dma_cap.enh_desc;
6879                 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
6880                                 !(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
6881                 priv->hw->pmt = priv->plat->pmt;
6882                 if (priv->dma_cap.hash_tb_sz) {
6883                         priv->hw->multicast_filter_bins =
6884                                         (BIT(priv->dma_cap.hash_tb_sz) << 5);
6885                         priv->hw->mcast_bits_log2 =
6886                                         ilog2(priv->hw->multicast_filter_bins);
6887                 }
6888
6889                 /* TXCOE doesn't work in thresh DMA mode */
6890                 if (priv->plat->force_thresh_dma_mode)
6891                         priv->plat->tx_coe = 0;
6892                 else
6893                         priv->plat->tx_coe = priv->dma_cap.tx_coe;
6894
6895                 /* In case of GMAC4 rx_coe is from HW cap register. */
6896                 priv->plat->rx_coe = priv->dma_cap.rx_coe;
6897
6898                 if (priv->dma_cap.rx_coe_type2)
6899                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
6900                 else if (priv->dma_cap.rx_coe_type1)
6901                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
6902
6903         } else {
6904                 dev_info(priv->device, "No HW DMA feature register supported\n");
6905         }
6906
6907         if (priv->plat->rx_coe) {
6908                 priv->hw->rx_csum = priv->plat->rx_coe;
6909                 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
6910                 if (priv->synopsys_id < DWMAC_CORE_4_00)
6911                         dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
6912         }
6913         if (priv->plat->tx_coe)
6914                 dev_info(priv->device, "TX Checksum insertion supported\n");
6915
6916         if (priv->plat->pmt) {
6917                 dev_info(priv->device, "Wake-Up On Lan supported\n");
6918                 device_set_wakeup_capable(priv->device, 1);
6919         }
6920
6921         if (priv->dma_cap.tsoen)
6922                 dev_info(priv->device, "TSO supported\n");
6923
6924         priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en;
6925         priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
6926
6927         /* Run HW quirks, if any */
6928         if (priv->hwif_quirks) {
6929                 ret = priv->hwif_quirks(priv);
6930                 if (ret)
6931                         return ret;
6932         }
6933
6934         /* Rx Watchdog is available in the COREs newer than the 3.40.
6935          * In some case, for example on bugged HW this feature
6936          * has to be disable and this can be done by passing the
6937          * riwt_off field from the platform.
6938          */
6939         if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
6940             (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
6941                 priv->use_riwt = 1;
6942                 dev_info(priv->device,
6943                          "Enable RX Mitigation via HW Watchdog Timer\n");
6944         }
6945
6946         return 0;
6947 }
6948
6949 static void stmmac_napi_add(struct net_device *dev)
6950 {
6951         struct stmmac_priv *priv = netdev_priv(dev);
6952         u32 queue, maxq;
6953
6954         maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
6955
6956         for (queue = 0; queue < maxq; queue++) {
6957                 struct stmmac_channel *ch = &priv->channel[queue];
6958
6959                 ch->priv_data = priv;
6960                 ch->index = queue;
6961                 spin_lock_init(&ch->lock);
6962
6963                 if (queue < priv->plat->rx_queues_to_use) {
6964                         netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
6965                 }
6966                 if (queue < priv->plat->tx_queues_to_use) {
6967                         netif_napi_add_tx(dev, &ch->tx_napi,
6968                                           stmmac_napi_poll_tx);
6969                 }
6970                 if (queue < priv->plat->rx_queues_to_use &&
6971                     queue < priv->plat->tx_queues_to_use) {
6972                         netif_napi_add(dev, &ch->rxtx_napi,
6973                                        stmmac_napi_poll_rxtx);
6974                 }
6975         }
6976 }
6977
6978 static void stmmac_napi_del(struct net_device *dev)
6979 {
6980         struct stmmac_priv *priv = netdev_priv(dev);
6981         u32 queue, maxq;
6982
6983         maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
6984
6985         for (queue = 0; queue < maxq; queue++) {
6986                 struct stmmac_channel *ch = &priv->channel[queue];
6987
6988                 if (queue < priv->plat->rx_queues_to_use)
6989                         netif_napi_del(&ch->rx_napi);
6990                 if (queue < priv->plat->tx_queues_to_use)
6991                         netif_napi_del(&ch->tx_napi);
6992                 if (queue < priv->plat->rx_queues_to_use &&
6993                     queue < priv->plat->tx_queues_to_use) {
6994                         netif_napi_del(&ch->rxtx_napi);
6995                 }
6996         }
6997 }
6998
6999 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7000 {
7001         struct stmmac_priv *priv = netdev_priv(dev);
7002         int ret = 0, i;
7003
7004         if (netif_running(dev))
7005                 stmmac_release(dev);
7006
7007         stmmac_napi_del(dev);
7008
7009         priv->plat->rx_queues_to_use = rx_cnt;
7010         priv->plat->tx_queues_to_use = tx_cnt;
7011         if (!netif_is_rxfh_configured(dev))
7012                 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7013                         priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7014                                                                         rx_cnt);
7015
7016         stmmac_napi_add(dev);
7017
7018         if (netif_running(dev))
7019                 ret = stmmac_open(dev);
7020
7021         return ret;
7022 }
7023
7024 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7025 {
7026         struct stmmac_priv *priv = netdev_priv(dev);
7027         int ret = 0;
7028
7029         if (netif_running(dev))
7030                 stmmac_release(dev);
7031
7032         priv->dma_conf.dma_rx_size = rx_size;
7033         priv->dma_conf.dma_tx_size = tx_size;
7034
7035         if (netif_running(dev))
7036                 ret = stmmac_open(dev);
7037
7038         return ret;
7039 }
7040
7041 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
7042 static void stmmac_fpe_lp_task(struct work_struct *work)
7043 {
7044         struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7045                                                 fpe_task);
7046         struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
7047         enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
7048         enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
7049         bool *hs_enable = &fpe_cfg->hs_enable;
7050         bool *enable = &fpe_cfg->enable;
7051         int retries = 20;
7052
7053         while (retries-- > 0) {
7054                 /* Bail out immediately if FPE handshake is OFF */
7055                 if (*lo_state == FPE_STATE_OFF || !*hs_enable)
7056                         break;
7057
7058                 if (*lo_state == FPE_STATE_ENTERING_ON &&
7059                     *lp_state == FPE_STATE_ENTERING_ON) {
7060                         stmmac_fpe_configure(priv, priv->ioaddr,
7061                                              priv->plat->tx_queues_to_use,
7062                                              priv->plat->rx_queues_to_use,
7063                                              *enable);
7064
7065                         netdev_info(priv->dev, "configured FPE\n");
7066
7067                         *lo_state = FPE_STATE_ON;
7068                         *lp_state = FPE_STATE_ON;
7069                         netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
7070                         break;
7071                 }
7072
7073                 if ((*lo_state == FPE_STATE_CAPABLE ||
7074                      *lo_state == FPE_STATE_ENTERING_ON) &&
7075                      *lp_state != FPE_STATE_ON) {
7076                         netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
7077                                     *lo_state, *lp_state);
7078                         stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7079                                                 MPACKET_VERIFY);
7080                 }
7081                 /* Sleep then retry */
7082                 msleep(500);
7083         }
7084
7085         clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
7086 }
7087
7088 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
7089 {
7090         if (priv->plat->fpe_cfg->hs_enable != enable) {
7091                 if (enable) {
7092                         stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7093                                                 MPACKET_VERIFY);
7094                 } else {
7095                         priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
7096                         priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
7097                 }
7098
7099                 priv->plat->fpe_cfg->hs_enable = enable;
7100         }
7101 }
7102
7103 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7104 {
7105         const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7106         struct dma_desc *desc_contains_ts = ctx->desc;
7107         struct stmmac_priv *priv = ctx->priv;
7108         struct dma_desc *ndesc = ctx->ndesc;
7109         struct dma_desc *desc = ctx->desc;
7110         u64 ns = 0;
7111
7112         if (!priv->hwts_rx_en)
7113                 return -ENODATA;
7114
7115         /* For GMAC4, the valid timestamp is from CTX next desc. */
7116         if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7117                 desc_contains_ts = ndesc;
7118
7119         /* Check if timestamp is available */
7120         if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7121                 stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7122                 ns -= priv->plat->cdc_error_adj;
7123                 *timestamp = ns_to_ktime(ns);
7124                 return 0;
7125         }
7126
7127         return -ENODATA;
7128 }
7129
7130 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7131         .xmo_rx_timestamp               = stmmac_xdp_rx_timestamp,
7132 };
7133
7134 /**
7135  * stmmac_dvr_probe
7136  * @device: device pointer
7137  * @plat_dat: platform data pointer
7138  * @res: stmmac resource pointer
7139  * Description: this is the main probe function used to
7140  * call the alloc_etherdev, allocate the priv structure.
7141  * Return:
7142  * returns 0 on success, otherwise errno.
7143  */
7144 int stmmac_dvr_probe(struct device *device,
7145                      struct plat_stmmacenet_data *plat_dat,
7146                      struct stmmac_resources *res)
7147 {
7148         struct net_device *ndev = NULL;
7149         struct stmmac_priv *priv;
7150         u32 rxq;
7151         int i, ret = 0;
7152
7153         ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7154                                        MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7155         if (!ndev)
7156                 return -ENOMEM;
7157
7158         SET_NETDEV_DEV(ndev, device);
7159
7160         priv = netdev_priv(ndev);
7161         priv->device = device;
7162         priv->dev = ndev;
7163
7164         stmmac_set_ethtool_ops(ndev);
7165         priv->pause = pause;
7166         priv->plat = plat_dat;
7167         priv->ioaddr = res->addr;
7168         priv->dev->base_addr = (unsigned long)res->addr;
7169         priv->plat->dma_cfg->multi_msi_en = priv->plat->multi_msi_en;
7170
7171         priv->dev->irq = res->irq;
7172         priv->wol_irq = res->wol_irq;
7173         priv->lpi_irq = res->lpi_irq;
7174         priv->sfty_ce_irq = res->sfty_ce_irq;
7175         priv->sfty_ue_irq = res->sfty_ue_irq;
7176         for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7177                 priv->rx_irq[i] = res->rx_irq[i];
7178         for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7179                 priv->tx_irq[i] = res->tx_irq[i];
7180
7181         if (!is_zero_ether_addr(res->mac))
7182                 eth_hw_addr_set(priv->dev, res->mac);
7183
7184         dev_set_drvdata(device, priv->dev);
7185
7186         /* Verify driver arguments */
7187         stmmac_verify_args();
7188
7189         priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7190         if (!priv->af_xdp_zc_qps)
7191                 return -ENOMEM;
7192
7193         /* Allocate workqueue */
7194         priv->wq = create_singlethread_workqueue("stmmac_wq");
7195         if (!priv->wq) {
7196                 dev_err(priv->device, "failed to create workqueue\n");
7197                 ret = -ENOMEM;
7198                 goto error_wq_init;
7199         }
7200
7201         INIT_WORK(&priv->service_task, stmmac_service_task);
7202
7203         /* Initialize Link Partner FPE workqueue */
7204         INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
7205
7206         /* Override with kernel parameters if supplied XXX CRS XXX
7207          * this needs to have multiple instances
7208          */
7209         if ((phyaddr >= 0) && (phyaddr <= 31))
7210                 priv->plat->phy_addr = phyaddr;
7211
7212         if (priv->plat->stmmac_rst) {
7213                 ret = reset_control_assert(priv->plat->stmmac_rst);
7214                 reset_control_deassert(priv->plat->stmmac_rst);
7215                 /* Some reset controllers have only reset callback instead of
7216                  * assert + deassert callbacks pair.
7217                  */
7218                 if (ret == -ENOTSUPP)
7219                         reset_control_reset(priv->plat->stmmac_rst);
7220         }
7221
7222         ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7223         if (ret == -ENOTSUPP)
7224                 dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7225                         ERR_PTR(ret));
7226
7227         /* Init MAC and get the capabilities */
7228         ret = stmmac_hw_init(priv);
7229         if (ret)
7230                 goto error_hw_init;
7231
7232         /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7233          */
7234         if (priv->synopsys_id < DWMAC_CORE_5_20)
7235                 priv->plat->dma_cfg->dche = false;
7236
7237         stmmac_check_ether_addr(priv);
7238
7239         ndev->netdev_ops = &stmmac_netdev_ops;
7240
7241         ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7242
7243         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7244                             NETIF_F_RXCSUM;
7245         ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7246                              NETDEV_XDP_ACT_XSK_ZEROCOPY;
7247
7248         ret = stmmac_tc_init(priv, priv);
7249         if (!ret) {
7250                 ndev->hw_features |= NETIF_F_HW_TC;
7251         }
7252
7253         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
7254                 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7255                 if (priv->plat->has_gmac4)
7256                         ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7257                 priv->tso = true;
7258                 dev_info(priv->device, "TSO feature enabled\n");
7259         }
7260
7261         if (priv->dma_cap.sphen &&
7262             !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7263                 ndev->hw_features |= NETIF_F_GRO;
7264                 priv->sph_cap = true;
7265                 priv->sph = priv->sph_cap;
7266                 dev_info(priv->device, "SPH feature enabled\n");
7267         }
7268
7269         /* Ideally our host DMA address width is the same as for the
7270          * device. However, it may differ and then we have to use our
7271          * host DMA width for allocation and the device DMA width for
7272          * register handling.
7273          */
7274         if (priv->plat->host_dma_width)
7275                 priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7276         else
7277                 priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7278
7279         if (priv->dma_cap.host_dma_width) {
7280                 ret = dma_set_mask_and_coherent(device,
7281                                 DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7282                 if (!ret) {
7283                         dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7284                                  priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7285
7286                         /*
7287                          * If more than 32 bits can be addressed, make sure to
7288                          * enable enhanced addressing mode.
7289                          */
7290                         if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7291                                 priv->plat->dma_cfg->eame = true;
7292                 } else {
7293                         ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7294                         if (ret) {
7295                                 dev_err(priv->device, "Failed to set DMA Mask\n");
7296                                 goto error_hw_init;
7297                         }
7298
7299                         priv->dma_cap.host_dma_width = 32;
7300                 }
7301         }
7302
7303         ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7304         ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7305 #ifdef STMMAC_VLAN_TAG_USED
7306         /* Both mac100 and gmac support receive VLAN tag detection */
7307         ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7308         if (priv->dma_cap.vlhash) {
7309                 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7310                 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7311         }
7312         if (priv->dma_cap.vlins) {
7313                 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7314                 if (priv->dma_cap.dvlan)
7315                         ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7316         }
7317 #endif
7318         priv->msg_enable = netif_msg_init(debug, default_msg_level);
7319
7320         /* Initialize RSS */
7321         rxq = priv->plat->rx_queues_to_use;
7322         netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7323         for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7324                 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7325
7326         if (priv->dma_cap.rssen && priv->plat->rss_en)
7327                 ndev->features |= NETIF_F_RXHASH;
7328
7329         ndev->vlan_features |= ndev->features;
7330         /* TSO doesn't work on VLANs yet */
7331         ndev->vlan_features &= ~NETIF_F_TSO;
7332
7333         /* MTU range: 46 - hw-specific max */
7334         ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7335         if (priv->plat->has_xgmac)
7336                 ndev->max_mtu = XGMAC_JUMBO_LEN;
7337         else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7338                 ndev->max_mtu = JUMBO_LEN;
7339         else
7340                 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7341         /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7342          * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7343          */
7344         if ((priv->plat->maxmtu < ndev->max_mtu) &&
7345             (priv->plat->maxmtu >= ndev->min_mtu))
7346                 ndev->max_mtu = priv->plat->maxmtu;
7347         else if (priv->plat->maxmtu < ndev->min_mtu)
7348                 dev_warn(priv->device,
7349                          "%s: warning: maxmtu having invalid value (%d)\n",
7350                          __func__, priv->plat->maxmtu);
7351
7352         if (flow_ctrl)
7353                 priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
7354
7355         ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7356
7357         /* Setup channels NAPI */
7358         stmmac_napi_add(ndev);
7359
7360         mutex_init(&priv->lock);
7361
7362         /* If a specific clk_csr value is passed from the platform
7363          * this means that the CSR Clock Range selection cannot be
7364          * changed at run-time and it is fixed. Viceversa the driver'll try to
7365          * set the MDC clock dynamically according to the csr actual
7366          * clock input.
7367          */
7368         if (priv->plat->clk_csr >= 0)
7369                 priv->clk_csr = priv->plat->clk_csr;
7370         else
7371                 stmmac_clk_csr_set(priv);
7372
7373         stmmac_check_pcs_mode(priv);
7374
7375         pm_runtime_get_noresume(device);
7376         pm_runtime_set_active(device);
7377         if (!pm_runtime_enabled(device))
7378                 pm_runtime_enable(device);
7379
7380         if (priv->hw->pcs != STMMAC_PCS_TBI &&
7381             priv->hw->pcs != STMMAC_PCS_RTBI) {
7382                 /* MDIO bus Registration */
7383                 ret = stmmac_mdio_register(ndev);
7384                 if (ret < 0) {
7385                         dev_err_probe(priv->device, ret,
7386                                       "%s: MDIO bus (id: %d) registration failed\n",
7387                                       __func__, priv->plat->bus_id);
7388                         goto error_mdio_register;
7389                 }
7390         }
7391
7392         if (priv->plat->speed_mode_2500)
7393                 priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7394
7395         if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7396                 ret = stmmac_xpcs_setup(priv->mii);
7397                 if (ret)
7398                         goto error_xpcs_setup;
7399         }
7400
7401         ret = stmmac_phy_setup(priv);
7402         if (ret) {
7403                 netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7404                 goto error_phy_setup;
7405         }
7406
7407         ret = register_netdev(ndev);
7408         if (ret) {
7409                 dev_err(priv->device, "%s: ERROR %i registering the device\n",
7410                         __func__, ret);
7411                 goto error_netdev_register;
7412         }
7413
7414 #ifdef CONFIG_DEBUG_FS
7415         stmmac_init_fs(ndev);
7416 #endif
7417
7418         if (priv->plat->dump_debug_regs)
7419                 priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7420
7421         /* Let pm_runtime_put() disable the clocks.
7422          * If CONFIG_PM is not enabled, the clocks will stay powered.
7423          */
7424         pm_runtime_put(device);
7425
7426         return ret;
7427
7428 error_netdev_register:
7429         phylink_destroy(priv->phylink);
7430 error_xpcs_setup:
7431 error_phy_setup:
7432         if (priv->hw->pcs != STMMAC_PCS_TBI &&
7433             priv->hw->pcs != STMMAC_PCS_RTBI)
7434                 stmmac_mdio_unregister(ndev);
7435 error_mdio_register:
7436         stmmac_napi_del(ndev);
7437 error_hw_init:
7438         destroy_workqueue(priv->wq);
7439 error_wq_init:
7440         bitmap_free(priv->af_xdp_zc_qps);
7441
7442         return ret;
7443 }
7444 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7445
7446 /**
7447  * stmmac_dvr_remove
7448  * @dev: device pointer
7449  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7450  * changes the link status, releases the DMA descriptor rings.
7451  */
7452 void stmmac_dvr_remove(struct device *dev)
7453 {
7454         struct net_device *ndev = dev_get_drvdata(dev);
7455         struct stmmac_priv *priv = netdev_priv(ndev);
7456
7457         netdev_info(priv->dev, "%s: removing driver", __func__);
7458
7459         pm_runtime_get_sync(dev);
7460
7461         stmmac_stop_all_dma(priv);
7462         stmmac_mac_set(priv, priv->ioaddr, false);
7463         netif_carrier_off(ndev);
7464         unregister_netdev(ndev);
7465
7466 #ifdef CONFIG_DEBUG_FS
7467         stmmac_exit_fs(ndev);
7468 #endif
7469         phylink_destroy(priv->phylink);
7470         if (priv->plat->stmmac_rst)
7471                 reset_control_assert(priv->plat->stmmac_rst);
7472         reset_control_assert(priv->plat->stmmac_ahb_rst);
7473         if (priv->hw->pcs != STMMAC_PCS_TBI &&
7474             priv->hw->pcs != STMMAC_PCS_RTBI)
7475                 stmmac_mdio_unregister(ndev);
7476         destroy_workqueue(priv->wq);
7477         mutex_destroy(&priv->lock);
7478         bitmap_free(priv->af_xdp_zc_qps);
7479
7480         pm_runtime_disable(dev);
7481         pm_runtime_put_noidle(dev);
7482 }
7483 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7484
7485 /**
7486  * stmmac_suspend - suspend callback
7487  * @dev: device pointer
7488  * Description: this is the function to suspend the device and it is called
7489  * by the platform driver to stop the network queue, release the resources,
7490  * program the PMT register (for WoL), clean and release driver resources.
7491  */
7492 int stmmac_suspend(struct device *dev)
7493 {
7494         struct net_device *ndev = dev_get_drvdata(dev);
7495         struct stmmac_priv *priv = netdev_priv(ndev);
7496         u32 chan;
7497
7498         if (!ndev || !netif_running(ndev))
7499                 return 0;
7500
7501         mutex_lock(&priv->lock);
7502
7503         netif_device_detach(ndev);
7504
7505         stmmac_disable_all_queues(priv);
7506
7507         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7508                 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7509
7510         if (priv->eee_enabled) {
7511                 priv->tx_path_in_lpi_mode = false;
7512                 del_timer_sync(&priv->eee_ctrl_timer);
7513         }
7514
7515         /* Stop TX/RX DMA */
7516         stmmac_stop_all_dma(priv);
7517
7518         if (priv->plat->serdes_powerdown)
7519                 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7520
7521         /* Enable Power down mode by programming the PMT regs */
7522         if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7523                 stmmac_pmt(priv, priv->hw, priv->wolopts);
7524                 priv->irq_wake = 1;
7525         } else {
7526                 stmmac_mac_set(priv, priv->ioaddr, false);
7527                 pinctrl_pm_select_sleep_state(priv->device);
7528         }
7529
7530         mutex_unlock(&priv->lock);
7531
7532         rtnl_lock();
7533         if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7534                 phylink_suspend(priv->phylink, true);
7535         } else {
7536                 if (device_may_wakeup(priv->device))
7537                         phylink_speed_down(priv->phylink, false);
7538                 phylink_suspend(priv->phylink, false);
7539         }
7540         rtnl_unlock();
7541
7542         if (priv->dma_cap.fpesel) {
7543                 /* Disable FPE */
7544                 stmmac_fpe_configure(priv, priv->ioaddr,
7545                                      priv->plat->tx_queues_to_use,
7546                                      priv->plat->rx_queues_to_use, false);
7547
7548                 stmmac_fpe_handshake(priv, false);
7549                 stmmac_fpe_stop_wq(priv);
7550         }
7551
7552         priv->speed = SPEED_UNKNOWN;
7553         return 0;
7554 }
7555 EXPORT_SYMBOL_GPL(stmmac_suspend);
7556
7557 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7558 {
7559         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7560
7561         rx_q->cur_rx = 0;
7562         rx_q->dirty_rx = 0;
7563 }
7564
7565 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7566 {
7567         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7568
7569         tx_q->cur_tx = 0;
7570         tx_q->dirty_tx = 0;
7571         tx_q->mss = 0;
7572
7573         netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7574 }
7575
7576 /**
7577  * stmmac_reset_queues_param - reset queue parameters
7578  * @priv: device pointer
7579  */
7580 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7581 {
7582         u32 rx_cnt = priv->plat->rx_queues_to_use;
7583         u32 tx_cnt = priv->plat->tx_queues_to_use;
7584         u32 queue;
7585
7586         for (queue = 0; queue < rx_cnt; queue++)
7587                 stmmac_reset_rx_queue(priv, queue);
7588
7589         for (queue = 0; queue < tx_cnt; queue++)
7590                 stmmac_reset_tx_queue(priv, queue);
7591 }
7592
7593 /**
7594  * stmmac_resume - resume callback
7595  * @dev: device pointer
7596  * Description: when resume this function is invoked to setup the DMA and CORE
7597  * in a usable state.
7598  */
7599 int stmmac_resume(struct device *dev)
7600 {
7601         struct net_device *ndev = dev_get_drvdata(dev);
7602         struct stmmac_priv *priv = netdev_priv(ndev);
7603         int ret;
7604
7605         if (!netif_running(ndev))
7606                 return 0;
7607
7608         /* Power Down bit, into the PM register, is cleared
7609          * automatically as soon as a magic packet or a Wake-up frame
7610          * is received. Anyway, it's better to manually clear
7611          * this bit because it can generate problems while resuming
7612          * from another devices (e.g. serial console).
7613          */
7614         if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7615                 mutex_lock(&priv->lock);
7616                 stmmac_pmt(priv, priv->hw, 0);
7617                 mutex_unlock(&priv->lock);
7618                 priv->irq_wake = 0;
7619         } else {
7620                 pinctrl_pm_select_default_state(priv->device);
7621                 /* reset the phy so that it's ready */
7622                 if (priv->mii)
7623                         stmmac_mdio_reset(priv->mii);
7624         }
7625
7626         if (!priv->plat->serdes_up_after_phy_linkup && priv->plat->serdes_powerup) {
7627                 ret = priv->plat->serdes_powerup(ndev,
7628                                                  priv->plat->bsp_priv);
7629
7630                 if (ret < 0)
7631                         return ret;
7632         }
7633
7634         rtnl_lock();
7635         if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7636                 phylink_resume(priv->phylink);
7637         } else {
7638                 phylink_resume(priv->phylink);
7639                 if (device_may_wakeup(priv->device))
7640                         phylink_speed_up(priv->phylink);
7641         }
7642         rtnl_unlock();
7643
7644         rtnl_lock();
7645         mutex_lock(&priv->lock);
7646
7647         stmmac_reset_queues_param(priv);
7648
7649         stmmac_free_tx_skbufs(priv);
7650         stmmac_clear_descriptors(priv, &priv->dma_conf);
7651
7652         stmmac_hw_setup(ndev, false);
7653         stmmac_init_coalesce(priv);
7654         stmmac_set_rx_mode(ndev);
7655
7656         stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7657
7658         stmmac_enable_all_queues(priv);
7659         stmmac_enable_all_dma_irq(priv);
7660
7661         mutex_unlock(&priv->lock);
7662         rtnl_unlock();
7663
7664         netif_device_attach(ndev);
7665
7666         return 0;
7667 }
7668 EXPORT_SYMBOL_GPL(stmmac_resume);
7669
7670 #ifndef MODULE
7671 static int __init stmmac_cmdline_opt(char *str)
7672 {
7673         char *opt;
7674
7675         if (!str || !*str)
7676                 return 1;
7677         while ((opt = strsep(&str, ",")) != NULL) {
7678                 if (!strncmp(opt, "debug:", 6)) {
7679                         if (kstrtoint(opt + 6, 0, &debug))
7680                                 goto err;
7681                 } else if (!strncmp(opt, "phyaddr:", 8)) {
7682                         if (kstrtoint(opt + 8, 0, &phyaddr))
7683                                 goto err;
7684                 } else if (!strncmp(opt, "buf_sz:", 7)) {
7685                         if (kstrtoint(opt + 7, 0, &buf_sz))
7686                                 goto err;
7687                 } else if (!strncmp(opt, "tc:", 3)) {
7688                         if (kstrtoint(opt + 3, 0, &tc))
7689                                 goto err;
7690                 } else if (!strncmp(opt, "watchdog:", 9)) {
7691                         if (kstrtoint(opt + 9, 0, &watchdog))
7692                                 goto err;
7693                 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
7694                         if (kstrtoint(opt + 10, 0, &flow_ctrl))
7695                                 goto err;
7696                 } else if (!strncmp(opt, "pause:", 6)) {
7697                         if (kstrtoint(opt + 6, 0, &pause))
7698                                 goto err;
7699                 } else if (!strncmp(opt, "eee_timer:", 10)) {
7700                         if (kstrtoint(opt + 10, 0, &eee_timer))
7701                                 goto err;
7702                 } else if (!strncmp(opt, "chain_mode:", 11)) {
7703                         if (kstrtoint(opt + 11, 0, &chain_mode))
7704                                 goto err;
7705                 }
7706         }
7707         return 1;
7708
7709 err:
7710         pr_err("%s: ERROR broken module parameter conversion", __func__);
7711         return 1;
7712 }
7713
7714 __setup("stmmaceth=", stmmac_cmdline_opt);
7715 #endif /* MODULE */
7716
7717 static int __init stmmac_init(void)
7718 {
7719 #ifdef CONFIG_DEBUG_FS
7720         /* Create debugfs main directory if it doesn't exist yet */
7721         if (!stmmac_fs_dir)
7722                 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7723         register_netdevice_notifier(&stmmac_notifier);
7724 #endif
7725
7726         return 0;
7727 }
7728
7729 static void __exit stmmac_exit(void)
7730 {
7731 #ifdef CONFIG_DEBUG_FS
7732         unregister_netdevice_notifier(&stmmac_notifier);
7733         debugfs_remove_recursive(stmmac_fs_dir);
7734 #endif
7735 }
7736
7737 module_init(stmmac_init)
7738 module_exit(stmmac_exit)
7739
7740 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7741 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7742 MODULE_LICENSE("GPL");