Merge 6.4-rc5 into usb-next
[platform/kernel/linux-starfive.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5
6         Copyright(C) 2007-2011 STMicroelectronics Ltd
7
8
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10
11   Documentation available at:
12         http://www.stlinux.com
13   Support available at:
14         https://bugzilla.stlinux.com/
15 *******************************************************************************/
16
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/pkt_cls.h>
43 #include <net/xdp_sock_drv.h>
44 #include "stmmac_ptp.h"
45 #include "stmmac.h"
46 #include "stmmac_xdp.h"
47 #include <linux/reset.h>
48 #include <linux/of_mdio.h>
49 #include "dwmac1000.h"
50 #include "dwxgmac2.h"
51 #include "hwif.h"
52
53 /* As long as the interface is active, we keep the timestamping counter enabled
54  * with fine resolution and binary rollover. This avoid non-monotonic behavior
55  * (clock jumps) when changing timestamping settings at runtime.
56  */
57 #define STMMAC_HWTS_ACTIVE      (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
58                                  PTP_TCR_TSCTRLSSR)
59
60 #define STMMAC_ALIGN(x)         ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
61 #define TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
62
63 /* Module parameters */
64 #define TX_TIMEO        5000
65 static int watchdog = TX_TIMEO;
66 module_param(watchdog, int, 0644);
67 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
68
69 static int debug = -1;
70 module_param(debug, int, 0644);
71 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
72
73 static int phyaddr = -1;
74 module_param(phyaddr, int, 0444);
75 MODULE_PARM_DESC(phyaddr, "Physical device address");
76
77 #define STMMAC_TX_THRESH(x)     ((x)->dma_conf.dma_tx_size / 4)
78 #define STMMAC_RX_THRESH(x)     ((x)->dma_conf.dma_rx_size / 4)
79
80 /* Limit to make sure XDP TX and slow path can coexist */
81 #define STMMAC_XSK_TX_BUDGET_MAX        256
82 #define STMMAC_TX_XSK_AVAIL             16
83 #define STMMAC_RX_FILL_BATCH            16
84
85 #define STMMAC_XDP_PASS         0
86 #define STMMAC_XDP_CONSUMED     BIT(0)
87 #define STMMAC_XDP_TX           BIT(1)
88 #define STMMAC_XDP_REDIRECT     BIT(2)
89
90 static int flow_ctrl = FLOW_AUTO;
91 module_param(flow_ctrl, int, 0644);
92 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
93
94 static int pause = PAUSE_TIME;
95 module_param(pause, int, 0644);
96 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
97
98 #define TC_DEFAULT 64
99 static int tc = TC_DEFAULT;
100 module_param(tc, int, 0644);
101 MODULE_PARM_DESC(tc, "DMA threshold control value");
102
103 #define DEFAULT_BUFSIZE 1536
104 static int buf_sz = DEFAULT_BUFSIZE;
105 module_param(buf_sz, int, 0644);
106 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
107
108 #define STMMAC_RX_COPYBREAK     256
109
110 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
111                                       NETIF_MSG_LINK | NETIF_MSG_IFUP |
112                                       NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
113
114 #define STMMAC_DEFAULT_LPI_TIMER        1000
115 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
116 module_param(eee_timer, int, 0644);
117 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
118 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
119
120 /* By default the driver will use the ring mode to manage tx and rx descriptors,
121  * but allow user to force to use the chain instead of the ring
122  */
123 static unsigned int chain_mode;
124 module_param(chain_mode, int, 0444);
125 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
126
127 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
128 /* For MSI interrupts handling */
129 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
130 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
132 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
133 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
134 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
136 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
137 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
138 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
139                                           u32 rxmode, u32 chan);
140
141 #ifdef CONFIG_DEBUG_FS
142 static const struct net_device_ops stmmac_netdev_ops;
143 static void stmmac_init_fs(struct net_device *dev);
144 static void stmmac_exit_fs(struct net_device *dev);
145 #endif
146
147 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
148
149 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
150 {
151         int ret = 0;
152
153         if (enabled) {
154                 ret = clk_prepare_enable(priv->plat->stmmac_clk);
155                 if (ret)
156                         return ret;
157                 ret = clk_prepare_enable(priv->plat->pclk);
158                 if (ret) {
159                         clk_disable_unprepare(priv->plat->stmmac_clk);
160                         return ret;
161                 }
162                 if (priv->plat->clks_config) {
163                         ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
164                         if (ret) {
165                                 clk_disable_unprepare(priv->plat->stmmac_clk);
166                                 clk_disable_unprepare(priv->plat->pclk);
167                                 return ret;
168                         }
169                 }
170         } else {
171                 clk_disable_unprepare(priv->plat->stmmac_clk);
172                 clk_disable_unprepare(priv->plat->pclk);
173                 if (priv->plat->clks_config)
174                         priv->plat->clks_config(priv->plat->bsp_priv, enabled);
175         }
176
177         return ret;
178 }
179 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
180
181 /**
182  * stmmac_verify_args - verify the driver parameters.
183  * Description: it checks the driver parameters and set a default in case of
184  * errors.
185  */
186 static void stmmac_verify_args(void)
187 {
188         if (unlikely(watchdog < 0))
189                 watchdog = TX_TIMEO;
190         if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
191                 buf_sz = DEFAULT_BUFSIZE;
192         if (unlikely(flow_ctrl > 1))
193                 flow_ctrl = FLOW_AUTO;
194         else if (likely(flow_ctrl < 0))
195                 flow_ctrl = FLOW_OFF;
196         if (unlikely((pause < 0) || (pause > 0xffff)))
197                 pause = PAUSE_TIME;
198         if (eee_timer < 0)
199                 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
200 }
201
202 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
203 {
204         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
205         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
206         u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
207         u32 queue;
208
209         for (queue = 0; queue < maxq; queue++) {
210                 struct stmmac_channel *ch = &priv->channel[queue];
211
212                 if (stmmac_xdp_is_enabled(priv) &&
213                     test_bit(queue, priv->af_xdp_zc_qps)) {
214                         napi_disable(&ch->rxtx_napi);
215                         continue;
216                 }
217
218                 if (queue < rx_queues_cnt)
219                         napi_disable(&ch->rx_napi);
220                 if (queue < tx_queues_cnt)
221                         napi_disable(&ch->tx_napi);
222         }
223 }
224
225 /**
226  * stmmac_disable_all_queues - Disable all queues
227  * @priv: driver private structure
228  */
229 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
230 {
231         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
232         struct stmmac_rx_queue *rx_q;
233         u32 queue;
234
235         /* synchronize_rcu() needed for pending XDP buffers to drain */
236         for (queue = 0; queue < rx_queues_cnt; queue++) {
237                 rx_q = &priv->dma_conf.rx_queue[queue];
238                 if (rx_q->xsk_pool) {
239                         synchronize_rcu();
240                         break;
241                 }
242         }
243
244         __stmmac_disable_all_queues(priv);
245 }
246
247 /**
248  * stmmac_enable_all_queues - Enable all queues
249  * @priv: driver private structure
250  */
251 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
252 {
253         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
254         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
255         u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
256         u32 queue;
257
258         for (queue = 0; queue < maxq; queue++) {
259                 struct stmmac_channel *ch = &priv->channel[queue];
260
261                 if (stmmac_xdp_is_enabled(priv) &&
262                     test_bit(queue, priv->af_xdp_zc_qps)) {
263                         napi_enable(&ch->rxtx_napi);
264                         continue;
265                 }
266
267                 if (queue < rx_queues_cnt)
268                         napi_enable(&ch->rx_napi);
269                 if (queue < tx_queues_cnt)
270                         napi_enable(&ch->tx_napi);
271         }
272 }
273
274 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
275 {
276         if (!test_bit(STMMAC_DOWN, &priv->state) &&
277             !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
278                 queue_work(priv->wq, &priv->service_task);
279 }
280
281 static void stmmac_global_err(struct stmmac_priv *priv)
282 {
283         netif_carrier_off(priv->dev);
284         set_bit(STMMAC_RESET_REQUESTED, &priv->state);
285         stmmac_service_event_schedule(priv);
286 }
287
288 /**
289  * stmmac_clk_csr_set - dynamically set the MDC clock
290  * @priv: driver private structure
291  * Description: this is to dynamically set the MDC clock according to the csr
292  * clock input.
293  * Note:
294  *      If a specific clk_csr value is passed from the platform
295  *      this means that the CSR Clock Range selection cannot be
296  *      changed at run-time and it is fixed (as reported in the driver
297  *      documentation). Viceversa the driver will try to set the MDC
298  *      clock dynamically according to the actual clock input.
299  */
300 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
301 {
302         u32 clk_rate;
303
304         clk_rate = clk_get_rate(priv->plat->stmmac_clk);
305
306         /* Platform provided default clk_csr would be assumed valid
307          * for all other cases except for the below mentioned ones.
308          * For values higher than the IEEE 802.3 specified frequency
309          * we can not estimate the proper divider as it is not known
310          * the frequency of clk_csr_i. So we do not change the default
311          * divider.
312          */
313         if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
314                 if (clk_rate < CSR_F_35M)
315                         priv->clk_csr = STMMAC_CSR_20_35M;
316                 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
317                         priv->clk_csr = STMMAC_CSR_35_60M;
318                 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
319                         priv->clk_csr = STMMAC_CSR_60_100M;
320                 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
321                         priv->clk_csr = STMMAC_CSR_100_150M;
322                 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
323                         priv->clk_csr = STMMAC_CSR_150_250M;
324                 else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
325                         priv->clk_csr = STMMAC_CSR_250_300M;
326         }
327
328         if (priv->plat->has_sun8i) {
329                 if (clk_rate > 160000000)
330                         priv->clk_csr = 0x03;
331                 else if (clk_rate > 80000000)
332                         priv->clk_csr = 0x02;
333                 else if (clk_rate > 40000000)
334                         priv->clk_csr = 0x01;
335                 else
336                         priv->clk_csr = 0;
337         }
338
339         if (priv->plat->has_xgmac) {
340                 if (clk_rate > 400000000)
341                         priv->clk_csr = 0x5;
342                 else if (clk_rate > 350000000)
343                         priv->clk_csr = 0x4;
344                 else if (clk_rate > 300000000)
345                         priv->clk_csr = 0x3;
346                 else if (clk_rate > 250000000)
347                         priv->clk_csr = 0x2;
348                 else if (clk_rate > 150000000)
349                         priv->clk_csr = 0x1;
350                 else
351                         priv->clk_csr = 0x0;
352         }
353 }
354
355 static void print_pkt(unsigned char *buf, int len)
356 {
357         pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
358         print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
359 }
360
361 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
362 {
363         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
364         u32 avail;
365
366         if (tx_q->dirty_tx > tx_q->cur_tx)
367                 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
368         else
369                 avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
370
371         return avail;
372 }
373
374 /**
375  * stmmac_rx_dirty - Get RX queue dirty
376  * @priv: driver private structure
377  * @queue: RX queue index
378  */
379 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
380 {
381         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
382         u32 dirty;
383
384         if (rx_q->dirty_rx <= rx_q->cur_rx)
385                 dirty = rx_q->cur_rx - rx_q->dirty_rx;
386         else
387                 dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
388
389         return dirty;
390 }
391
392 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
393 {
394         int tx_lpi_timer;
395
396         /* Clear/set the SW EEE timer flag based on LPI ET enablement */
397         priv->eee_sw_timer_en = en ? 0 : 1;
398         tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
399         stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
400 }
401
402 /**
403  * stmmac_enable_eee_mode - check and enter in LPI mode
404  * @priv: driver private structure
405  * Description: this function is to verify and enter in LPI mode in case of
406  * EEE.
407  */
408 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
409 {
410         u32 tx_cnt = priv->plat->tx_queues_to_use;
411         u32 queue;
412
413         /* check if all TX queues have the work finished */
414         for (queue = 0; queue < tx_cnt; queue++) {
415                 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
416
417                 if (tx_q->dirty_tx != tx_q->cur_tx)
418                         return -EBUSY; /* still unfinished work */
419         }
420
421         /* Check and enter in LPI mode */
422         if (!priv->tx_path_in_lpi_mode)
423                 stmmac_set_eee_mode(priv, priv->hw,
424                                 priv->plat->en_tx_lpi_clockgating);
425         return 0;
426 }
427
428 /**
429  * stmmac_disable_eee_mode - disable and exit from LPI mode
430  * @priv: driver private structure
431  * Description: this function is to exit and disable EEE in case of
432  * LPI state is true. This is called by the xmit.
433  */
434 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
435 {
436         if (!priv->eee_sw_timer_en) {
437                 stmmac_lpi_entry_timer_config(priv, 0);
438                 return;
439         }
440
441         stmmac_reset_eee_mode(priv, priv->hw);
442         del_timer_sync(&priv->eee_ctrl_timer);
443         priv->tx_path_in_lpi_mode = false;
444 }
445
446 /**
447  * stmmac_eee_ctrl_timer - EEE TX SW timer.
448  * @t:  timer_list struct containing private info
449  * Description:
450  *  if there is no data transfer and if we are not in LPI state,
451  *  then MAC Transmitter can be moved to LPI state.
452  */
453 static void stmmac_eee_ctrl_timer(struct timer_list *t)
454 {
455         struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
456
457         if (stmmac_enable_eee_mode(priv))
458                 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
459 }
460
461 /**
462  * stmmac_eee_init - init EEE
463  * @priv: driver private structure
464  * Description:
465  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
466  *  can also manage EEE, this function enable the LPI state and start related
467  *  timer.
468  */
469 bool stmmac_eee_init(struct stmmac_priv *priv)
470 {
471         int eee_tw_timer = priv->eee_tw_timer;
472
473         /* Using PCS we cannot dial with the phy registers at this stage
474          * so we do not support extra feature like EEE.
475          */
476         if (priv->hw->pcs == STMMAC_PCS_TBI ||
477             priv->hw->pcs == STMMAC_PCS_RTBI)
478                 return false;
479
480         /* Check if MAC core supports the EEE feature. */
481         if (!priv->dma_cap.eee)
482                 return false;
483
484         mutex_lock(&priv->lock);
485
486         /* Check if it needs to be deactivated */
487         if (!priv->eee_active) {
488                 if (priv->eee_enabled) {
489                         netdev_dbg(priv->dev, "disable EEE\n");
490                         stmmac_lpi_entry_timer_config(priv, 0);
491                         del_timer_sync(&priv->eee_ctrl_timer);
492                         stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
493                         if (priv->hw->xpcs)
494                                 xpcs_config_eee(priv->hw->xpcs,
495                                                 priv->plat->mult_fact_100ns,
496                                                 false);
497                 }
498                 mutex_unlock(&priv->lock);
499                 return false;
500         }
501
502         if (priv->eee_active && !priv->eee_enabled) {
503                 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
504                 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
505                                      eee_tw_timer);
506                 if (priv->hw->xpcs)
507                         xpcs_config_eee(priv->hw->xpcs,
508                                         priv->plat->mult_fact_100ns,
509                                         true);
510         }
511
512         if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
513                 del_timer_sync(&priv->eee_ctrl_timer);
514                 priv->tx_path_in_lpi_mode = false;
515                 stmmac_lpi_entry_timer_config(priv, 1);
516         } else {
517                 stmmac_lpi_entry_timer_config(priv, 0);
518                 mod_timer(&priv->eee_ctrl_timer,
519                           STMMAC_LPI_T(priv->tx_lpi_timer));
520         }
521
522         mutex_unlock(&priv->lock);
523         netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
524         return true;
525 }
526
527 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
528  * @priv: driver private structure
529  * @p : descriptor pointer
530  * @skb : the socket buffer
531  * Description :
532  * This function will read timestamp from the descriptor & pass it to stack.
533  * and also perform some sanity checks.
534  */
535 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
536                                    struct dma_desc *p, struct sk_buff *skb)
537 {
538         struct skb_shared_hwtstamps shhwtstamp;
539         bool found = false;
540         u64 ns = 0;
541
542         if (!priv->hwts_tx_en)
543                 return;
544
545         /* exit if skb doesn't support hw tstamp */
546         if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
547                 return;
548
549         /* check tx tstamp status */
550         if (stmmac_get_tx_timestamp_status(priv, p)) {
551                 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
552                 found = true;
553         } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
554                 found = true;
555         }
556
557         if (found) {
558                 ns -= priv->plat->cdc_error_adj;
559
560                 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
561                 shhwtstamp.hwtstamp = ns_to_ktime(ns);
562
563                 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
564                 /* pass tstamp to stack */
565                 skb_tstamp_tx(skb, &shhwtstamp);
566         }
567 }
568
569 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
570  * @priv: driver private structure
571  * @p : descriptor pointer
572  * @np : next descriptor pointer
573  * @skb : the socket buffer
574  * Description :
575  * This function will read received packet's timestamp from the descriptor
576  * and pass it to stack. It also perform some sanity checks.
577  */
578 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
579                                    struct dma_desc *np, struct sk_buff *skb)
580 {
581         struct skb_shared_hwtstamps *shhwtstamp = NULL;
582         struct dma_desc *desc = p;
583         u64 ns = 0;
584
585         if (!priv->hwts_rx_en)
586                 return;
587         /* For GMAC4, the valid timestamp is from CTX next desc. */
588         if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
589                 desc = np;
590
591         /* Check if timestamp is available */
592         if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
593                 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
594
595                 ns -= priv->plat->cdc_error_adj;
596
597                 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
598                 shhwtstamp = skb_hwtstamps(skb);
599                 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
600                 shhwtstamp->hwtstamp = ns_to_ktime(ns);
601         } else  {
602                 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
603         }
604 }
605
606 /**
607  *  stmmac_hwtstamp_set - control hardware timestamping.
608  *  @dev: device pointer.
609  *  @ifr: An IOCTL specific structure, that can contain a pointer to
610  *  a proprietary structure used to pass information to the driver.
611  *  Description:
612  *  This function configures the MAC to enable/disable both outgoing(TX)
613  *  and incoming(RX) packets time stamping based on user input.
614  *  Return Value:
615  *  0 on success and an appropriate -ve integer on failure.
616  */
617 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
618 {
619         struct stmmac_priv *priv = netdev_priv(dev);
620         struct hwtstamp_config config;
621         u32 ptp_v2 = 0;
622         u32 tstamp_all = 0;
623         u32 ptp_over_ipv4_udp = 0;
624         u32 ptp_over_ipv6_udp = 0;
625         u32 ptp_over_ethernet = 0;
626         u32 snap_type_sel = 0;
627         u32 ts_master_en = 0;
628         u32 ts_event_en = 0;
629
630         if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
631                 netdev_alert(priv->dev, "No support for HW time stamping\n");
632                 priv->hwts_tx_en = 0;
633                 priv->hwts_rx_en = 0;
634
635                 return -EOPNOTSUPP;
636         }
637
638         if (copy_from_user(&config, ifr->ifr_data,
639                            sizeof(config)))
640                 return -EFAULT;
641
642         netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
643                    __func__, config.flags, config.tx_type, config.rx_filter);
644
645         if (config.tx_type != HWTSTAMP_TX_OFF &&
646             config.tx_type != HWTSTAMP_TX_ON)
647                 return -ERANGE;
648
649         if (priv->adv_ts) {
650                 switch (config.rx_filter) {
651                 case HWTSTAMP_FILTER_NONE:
652                         /* time stamp no incoming packet at all */
653                         config.rx_filter = HWTSTAMP_FILTER_NONE;
654                         break;
655
656                 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
657                         /* PTP v1, UDP, any kind of event packet */
658                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
659                         /* 'xmac' hardware can support Sync, Pdelay_Req and
660                          * Pdelay_resp by setting bit14 and bits17/16 to 01
661                          * This leaves Delay_Req timestamps out.
662                          * Enable all events *and* general purpose message
663                          * timestamping
664                          */
665                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
666                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
667                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
668                         break;
669
670                 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
671                         /* PTP v1, UDP, Sync packet */
672                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
673                         /* take time stamp for SYNC messages only */
674                         ts_event_en = PTP_TCR_TSEVNTENA;
675
676                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
677                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
678                         break;
679
680                 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
681                         /* PTP v1, UDP, Delay_req packet */
682                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
683                         /* take time stamp for Delay_Req messages only */
684                         ts_master_en = PTP_TCR_TSMSTRENA;
685                         ts_event_en = PTP_TCR_TSEVNTENA;
686
687                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
688                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
689                         break;
690
691                 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
692                         /* PTP v2, UDP, any kind of event packet */
693                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
694                         ptp_v2 = PTP_TCR_TSVER2ENA;
695                         /* take time stamp for all event messages */
696                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
697
698                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
699                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
700                         break;
701
702                 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
703                         /* PTP v2, UDP, Sync packet */
704                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
705                         ptp_v2 = PTP_TCR_TSVER2ENA;
706                         /* take time stamp for SYNC messages only */
707                         ts_event_en = PTP_TCR_TSEVNTENA;
708
709                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
710                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
711                         break;
712
713                 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
714                         /* PTP v2, UDP, Delay_req packet */
715                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
716                         ptp_v2 = PTP_TCR_TSVER2ENA;
717                         /* take time stamp for Delay_Req messages only */
718                         ts_master_en = PTP_TCR_TSMSTRENA;
719                         ts_event_en = PTP_TCR_TSEVNTENA;
720
721                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
722                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
723                         break;
724
725                 case HWTSTAMP_FILTER_PTP_V2_EVENT:
726                         /* PTP v2/802.AS1 any layer, any kind of event packet */
727                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
728                         ptp_v2 = PTP_TCR_TSVER2ENA;
729                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
730                         if (priv->synopsys_id < DWMAC_CORE_4_10)
731                                 ts_event_en = PTP_TCR_TSEVNTENA;
732                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
733                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
734                         ptp_over_ethernet = PTP_TCR_TSIPENA;
735                         break;
736
737                 case HWTSTAMP_FILTER_PTP_V2_SYNC:
738                         /* PTP v2/802.AS1, any layer, Sync packet */
739                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
740                         ptp_v2 = PTP_TCR_TSVER2ENA;
741                         /* take time stamp for SYNC messages only */
742                         ts_event_en = PTP_TCR_TSEVNTENA;
743
744                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
745                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
746                         ptp_over_ethernet = PTP_TCR_TSIPENA;
747                         break;
748
749                 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
750                         /* PTP v2/802.AS1, any layer, Delay_req packet */
751                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
752                         ptp_v2 = PTP_TCR_TSVER2ENA;
753                         /* take time stamp for Delay_Req messages only */
754                         ts_master_en = PTP_TCR_TSMSTRENA;
755                         ts_event_en = PTP_TCR_TSEVNTENA;
756
757                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
758                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
759                         ptp_over_ethernet = PTP_TCR_TSIPENA;
760                         break;
761
762                 case HWTSTAMP_FILTER_NTP_ALL:
763                 case HWTSTAMP_FILTER_ALL:
764                         /* time stamp any incoming packet */
765                         config.rx_filter = HWTSTAMP_FILTER_ALL;
766                         tstamp_all = PTP_TCR_TSENALL;
767                         break;
768
769                 default:
770                         return -ERANGE;
771                 }
772         } else {
773                 switch (config.rx_filter) {
774                 case HWTSTAMP_FILTER_NONE:
775                         config.rx_filter = HWTSTAMP_FILTER_NONE;
776                         break;
777                 default:
778                         /* PTP v1, UDP, any kind of event packet */
779                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
780                         break;
781                 }
782         }
783         priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
784         priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
785
786         priv->systime_flags = STMMAC_HWTS_ACTIVE;
787
788         if (priv->hwts_tx_en || priv->hwts_rx_en) {
789                 priv->systime_flags |= tstamp_all | ptp_v2 |
790                                        ptp_over_ethernet | ptp_over_ipv6_udp |
791                                        ptp_over_ipv4_udp | ts_event_en |
792                                        ts_master_en | snap_type_sel;
793         }
794
795         stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
796
797         memcpy(&priv->tstamp_config, &config, sizeof(config));
798
799         return copy_to_user(ifr->ifr_data, &config,
800                             sizeof(config)) ? -EFAULT : 0;
801 }
802
803 /**
804  *  stmmac_hwtstamp_get - read hardware timestamping.
805  *  @dev: device pointer.
806  *  @ifr: An IOCTL specific structure, that can contain a pointer to
807  *  a proprietary structure used to pass information to the driver.
808  *  Description:
809  *  This function obtain the current hardware timestamping settings
810  *  as requested.
811  */
812 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
813 {
814         struct stmmac_priv *priv = netdev_priv(dev);
815         struct hwtstamp_config *config = &priv->tstamp_config;
816
817         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
818                 return -EOPNOTSUPP;
819
820         return copy_to_user(ifr->ifr_data, config,
821                             sizeof(*config)) ? -EFAULT : 0;
822 }
823
824 /**
825  * stmmac_init_tstamp_counter - init hardware timestamping counter
826  * @priv: driver private structure
827  * @systime_flags: timestamping flags
828  * Description:
829  * Initialize hardware counter for packet timestamping.
830  * This is valid as long as the interface is open and not suspended.
831  * Will be rerun after resuming from suspend, case in which the timestamping
832  * flags updated by stmmac_hwtstamp_set() also need to be restored.
833  */
834 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
835 {
836         bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
837         struct timespec64 now;
838         u32 sec_inc = 0;
839         u64 temp = 0;
840
841         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
842                 return -EOPNOTSUPP;
843
844         stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
845         priv->systime_flags = systime_flags;
846
847         /* program Sub Second Increment reg */
848         stmmac_config_sub_second_increment(priv, priv->ptpaddr,
849                                            priv->plat->clk_ptp_rate,
850                                            xmac, &sec_inc);
851         temp = div_u64(1000000000ULL, sec_inc);
852
853         /* Store sub second increment for later use */
854         priv->sub_second_inc = sec_inc;
855
856         /* calculate default added value:
857          * formula is :
858          * addend = (2^32)/freq_div_ratio;
859          * where, freq_div_ratio = 1e9ns/sec_inc
860          */
861         temp = (u64)(temp << 32);
862         priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
863         stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
864
865         /* initialize system time */
866         ktime_get_real_ts64(&now);
867
868         /* lower 32 bits of tv_sec are safe until y2106 */
869         stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
870
871         return 0;
872 }
873 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
874
875 /**
876  * stmmac_init_ptp - init PTP
877  * @priv: driver private structure
878  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
879  * This is done by looking at the HW cap. register.
880  * This function also registers the ptp driver.
881  */
882 static int stmmac_init_ptp(struct stmmac_priv *priv)
883 {
884         bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
885         int ret;
886
887         if (priv->plat->ptp_clk_freq_config)
888                 priv->plat->ptp_clk_freq_config(priv);
889
890         ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
891         if (ret)
892                 return ret;
893
894         priv->adv_ts = 0;
895         /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
896         if (xmac && priv->dma_cap.atime_stamp)
897                 priv->adv_ts = 1;
898         /* Dwmac 3.x core with extend_desc can support adv_ts */
899         else if (priv->extend_desc && priv->dma_cap.atime_stamp)
900                 priv->adv_ts = 1;
901
902         if (priv->dma_cap.time_stamp)
903                 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
904
905         if (priv->adv_ts)
906                 netdev_info(priv->dev,
907                             "IEEE 1588-2008 Advanced Timestamp supported\n");
908
909         priv->hwts_tx_en = 0;
910         priv->hwts_rx_en = 0;
911
912         return 0;
913 }
914
915 static void stmmac_release_ptp(struct stmmac_priv *priv)
916 {
917         clk_disable_unprepare(priv->plat->clk_ptp_ref);
918         stmmac_ptp_unregister(priv);
919 }
920
921 /**
922  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
923  *  @priv: driver private structure
924  *  @duplex: duplex passed to the next function
925  *  Description: It is used for configuring the flow control in all queues
926  */
927 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
928 {
929         u32 tx_cnt = priv->plat->tx_queues_to_use;
930
931         stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
932                         priv->pause, tx_cnt);
933 }
934
935 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
936                                                  phy_interface_t interface)
937 {
938         struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
939
940         if (!priv->hw->xpcs)
941                 return NULL;
942
943         return &priv->hw->xpcs->pcs;
944 }
945
946 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
947                               const struct phylink_link_state *state)
948 {
949         /* Nothing to do, xpcs_config() handles everything */
950 }
951
952 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
953 {
954         struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
955         enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
956         enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
957         bool *hs_enable = &fpe_cfg->hs_enable;
958
959         if (is_up && *hs_enable) {
960                 stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY);
961         } else {
962                 *lo_state = FPE_STATE_OFF;
963                 *lp_state = FPE_STATE_OFF;
964         }
965 }
966
967 static void stmmac_mac_link_down(struct phylink_config *config,
968                                  unsigned int mode, phy_interface_t interface)
969 {
970         struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
971
972         stmmac_mac_set(priv, priv->ioaddr, false);
973         priv->eee_active = false;
974         priv->tx_lpi_enabled = false;
975         priv->eee_enabled = stmmac_eee_init(priv);
976         stmmac_set_eee_pls(priv, priv->hw, false);
977
978         if (priv->dma_cap.fpesel)
979                 stmmac_fpe_link_state_handle(priv, false);
980 }
981
982 static void stmmac_mac_link_up(struct phylink_config *config,
983                                struct phy_device *phy,
984                                unsigned int mode, phy_interface_t interface,
985                                int speed, int duplex,
986                                bool tx_pause, bool rx_pause)
987 {
988         struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
989         u32 old_ctrl, ctrl;
990
991         if (priv->plat->serdes_up_after_phy_linkup && priv->plat->serdes_powerup)
992                 priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
993
994         old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
995         ctrl = old_ctrl & ~priv->hw->link.speed_mask;
996
997         if (interface == PHY_INTERFACE_MODE_USXGMII) {
998                 switch (speed) {
999                 case SPEED_10000:
1000                         ctrl |= priv->hw->link.xgmii.speed10000;
1001                         break;
1002                 case SPEED_5000:
1003                         ctrl |= priv->hw->link.xgmii.speed5000;
1004                         break;
1005                 case SPEED_2500:
1006                         ctrl |= priv->hw->link.xgmii.speed2500;
1007                         break;
1008                 default:
1009                         return;
1010                 }
1011         } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1012                 switch (speed) {
1013                 case SPEED_100000:
1014                         ctrl |= priv->hw->link.xlgmii.speed100000;
1015                         break;
1016                 case SPEED_50000:
1017                         ctrl |= priv->hw->link.xlgmii.speed50000;
1018                         break;
1019                 case SPEED_40000:
1020                         ctrl |= priv->hw->link.xlgmii.speed40000;
1021                         break;
1022                 case SPEED_25000:
1023                         ctrl |= priv->hw->link.xlgmii.speed25000;
1024                         break;
1025                 case SPEED_10000:
1026                         ctrl |= priv->hw->link.xgmii.speed10000;
1027                         break;
1028                 case SPEED_2500:
1029                         ctrl |= priv->hw->link.speed2500;
1030                         break;
1031                 case SPEED_1000:
1032                         ctrl |= priv->hw->link.speed1000;
1033                         break;
1034                 default:
1035                         return;
1036                 }
1037         } else {
1038                 switch (speed) {
1039                 case SPEED_2500:
1040                         ctrl |= priv->hw->link.speed2500;
1041                         break;
1042                 case SPEED_1000:
1043                         ctrl |= priv->hw->link.speed1000;
1044                         break;
1045                 case SPEED_100:
1046                         ctrl |= priv->hw->link.speed100;
1047                         break;
1048                 case SPEED_10:
1049                         ctrl |= priv->hw->link.speed10;
1050                         break;
1051                 default:
1052                         return;
1053                 }
1054         }
1055
1056         priv->speed = speed;
1057
1058         if (priv->plat->fix_mac_speed)
1059                 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
1060
1061         if (!duplex)
1062                 ctrl &= ~priv->hw->link.duplex;
1063         else
1064                 ctrl |= priv->hw->link.duplex;
1065
1066         /* Flow Control operation */
1067         if (rx_pause && tx_pause)
1068                 priv->flow_ctrl = FLOW_AUTO;
1069         else if (rx_pause && !tx_pause)
1070                 priv->flow_ctrl = FLOW_RX;
1071         else if (!rx_pause && tx_pause)
1072                 priv->flow_ctrl = FLOW_TX;
1073         else
1074                 priv->flow_ctrl = FLOW_OFF;
1075
1076         stmmac_mac_flow_ctrl(priv, duplex);
1077
1078         if (ctrl != old_ctrl)
1079                 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1080
1081         stmmac_mac_set(priv, priv->ioaddr, true);
1082         if (phy && priv->dma_cap.eee) {
1083                 priv->eee_active =
1084                         phy_init_eee(phy, !priv->plat->rx_clk_runs_in_lpi) >= 0;
1085                 priv->eee_enabled = stmmac_eee_init(priv);
1086                 priv->tx_lpi_enabled = priv->eee_enabled;
1087                 stmmac_set_eee_pls(priv, priv->hw, true);
1088         }
1089
1090         if (priv->dma_cap.fpesel)
1091                 stmmac_fpe_link_state_handle(priv, true);
1092 }
1093
1094 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1095         .mac_select_pcs = stmmac_mac_select_pcs,
1096         .mac_config = stmmac_mac_config,
1097         .mac_link_down = stmmac_mac_link_down,
1098         .mac_link_up = stmmac_mac_link_up,
1099 };
1100
1101 /**
1102  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1103  * @priv: driver private structure
1104  * Description: this is to verify if the HW supports the PCS.
1105  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1106  * configured for the TBI, RTBI, or SGMII PHY interface.
1107  */
1108 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1109 {
1110         int interface = priv->plat->interface;
1111
1112         if (priv->dma_cap.pcs) {
1113                 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1114                     (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1115                     (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1116                     (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1117                         netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1118                         priv->hw->pcs = STMMAC_PCS_RGMII;
1119                 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
1120                         netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1121                         priv->hw->pcs = STMMAC_PCS_SGMII;
1122                 }
1123         }
1124 }
1125
1126 /**
1127  * stmmac_init_phy - PHY initialization
1128  * @dev: net device structure
1129  * Description: it initializes the driver's PHY state, and attaches the PHY
1130  * to the mac driver.
1131  *  Return value:
1132  *  0 on success
1133  */
1134 static int stmmac_init_phy(struct net_device *dev)
1135 {
1136         struct stmmac_priv *priv = netdev_priv(dev);
1137         struct fwnode_handle *phy_fwnode;
1138         struct fwnode_handle *fwnode;
1139         int ret;
1140
1141         if (!phylink_expects_phy(priv->phylink))
1142                 return 0;
1143
1144         fwnode = of_fwnode_handle(priv->plat->phylink_node);
1145         if (!fwnode)
1146                 fwnode = dev_fwnode(priv->device);
1147
1148         if (fwnode)
1149                 phy_fwnode = fwnode_get_phy_node(fwnode);
1150         else
1151                 phy_fwnode = NULL;
1152
1153         /* Some DT bindings do not set-up the PHY handle. Let's try to
1154          * manually parse it
1155          */
1156         if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1157                 int addr = priv->plat->phy_addr;
1158                 struct phy_device *phydev;
1159
1160                 if (addr < 0) {
1161                         netdev_err(priv->dev, "no phy found\n");
1162                         return -ENODEV;
1163                 }
1164
1165                 phydev = mdiobus_get_phy(priv->mii, addr);
1166                 if (!phydev) {
1167                         netdev_err(priv->dev, "no phy at addr %d\n", addr);
1168                         return -ENODEV;
1169                 }
1170
1171                 ret = phylink_connect_phy(priv->phylink, phydev);
1172         } else {
1173                 fwnode_handle_put(phy_fwnode);
1174                 ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1175         }
1176
1177         if (!priv->plat->pmt) {
1178                 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1179
1180                 phylink_ethtool_get_wol(priv->phylink, &wol);
1181                 device_set_wakeup_capable(priv->device, !!wol.supported);
1182                 device_set_wakeup_enable(priv->device, !!wol.wolopts);
1183         }
1184
1185         return ret;
1186 }
1187
1188 static int stmmac_phy_setup(struct stmmac_priv *priv)
1189 {
1190         struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
1191         struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
1192         int max_speed = priv->plat->max_speed;
1193         int mode = priv->plat->phy_interface;
1194         struct phylink *phylink;
1195
1196         priv->phylink_config.dev = &priv->dev->dev;
1197         priv->phylink_config.type = PHYLINK_NETDEV;
1198         if (priv->plat->mdio_bus_data)
1199                 priv->phylink_config.ovr_an_inband =
1200                         mdio_bus_data->xpcs_an_inband;
1201
1202         if (!fwnode)
1203                 fwnode = dev_fwnode(priv->device);
1204
1205         /* Set the platform/firmware specified interface mode */
1206         __set_bit(mode, priv->phylink_config.supported_interfaces);
1207
1208         /* If we have an xpcs, it defines which PHY interfaces are supported. */
1209         if (priv->hw->xpcs)
1210                 xpcs_get_interfaces(priv->hw->xpcs,
1211                                     priv->phylink_config.supported_interfaces);
1212
1213         priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1214                 MAC_10 | MAC_100;
1215
1216         if (!max_speed || max_speed >= 1000)
1217                 priv->phylink_config.mac_capabilities |= MAC_1000;
1218
1219         if (priv->plat->has_gmac4) {
1220                 if (!max_speed || max_speed >= 2500)
1221                         priv->phylink_config.mac_capabilities |= MAC_2500FD;
1222         } else if (priv->plat->has_xgmac) {
1223                 if (!max_speed || max_speed >= 2500)
1224                         priv->phylink_config.mac_capabilities |= MAC_2500FD;
1225                 if (!max_speed || max_speed >= 5000)
1226                         priv->phylink_config.mac_capabilities |= MAC_5000FD;
1227                 if (!max_speed || max_speed >= 10000)
1228                         priv->phylink_config.mac_capabilities |= MAC_10000FD;
1229                 if (!max_speed || max_speed >= 25000)
1230                         priv->phylink_config.mac_capabilities |= MAC_25000FD;
1231                 if (!max_speed || max_speed >= 40000)
1232                         priv->phylink_config.mac_capabilities |= MAC_40000FD;
1233                 if (!max_speed || max_speed >= 50000)
1234                         priv->phylink_config.mac_capabilities |= MAC_50000FD;
1235                 if (!max_speed || max_speed >= 100000)
1236                         priv->phylink_config.mac_capabilities |= MAC_100000FD;
1237         }
1238
1239         /* Half-Duplex can only work with single queue */
1240         if (priv->plat->tx_queues_to_use > 1)
1241                 priv->phylink_config.mac_capabilities &=
1242                         ~(MAC_10HD | MAC_100HD | MAC_1000HD);
1243         priv->phylink_config.mac_managed_pm = true;
1244
1245         phylink = phylink_create(&priv->phylink_config, fwnode,
1246                                  mode, &stmmac_phylink_mac_ops);
1247         if (IS_ERR(phylink))
1248                 return PTR_ERR(phylink);
1249
1250         priv->phylink = phylink;
1251         return 0;
1252 }
1253
1254 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1255                                     struct stmmac_dma_conf *dma_conf)
1256 {
1257         u32 rx_cnt = priv->plat->rx_queues_to_use;
1258         unsigned int desc_size;
1259         void *head_rx;
1260         u32 queue;
1261
1262         /* Display RX rings */
1263         for (queue = 0; queue < rx_cnt; queue++) {
1264                 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1265
1266                 pr_info("\tRX Queue %u rings\n", queue);
1267
1268                 if (priv->extend_desc) {
1269                         head_rx = (void *)rx_q->dma_erx;
1270                         desc_size = sizeof(struct dma_extended_desc);
1271                 } else {
1272                         head_rx = (void *)rx_q->dma_rx;
1273                         desc_size = sizeof(struct dma_desc);
1274                 }
1275
1276                 /* Display RX ring */
1277                 stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1278                                     rx_q->dma_rx_phy, desc_size);
1279         }
1280 }
1281
1282 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1283                                     struct stmmac_dma_conf *dma_conf)
1284 {
1285         u32 tx_cnt = priv->plat->tx_queues_to_use;
1286         unsigned int desc_size;
1287         void *head_tx;
1288         u32 queue;
1289
1290         /* Display TX rings */
1291         for (queue = 0; queue < tx_cnt; queue++) {
1292                 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1293
1294                 pr_info("\tTX Queue %d rings\n", queue);
1295
1296                 if (priv->extend_desc) {
1297                         head_tx = (void *)tx_q->dma_etx;
1298                         desc_size = sizeof(struct dma_extended_desc);
1299                 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1300                         head_tx = (void *)tx_q->dma_entx;
1301                         desc_size = sizeof(struct dma_edesc);
1302                 } else {
1303                         head_tx = (void *)tx_q->dma_tx;
1304                         desc_size = sizeof(struct dma_desc);
1305                 }
1306
1307                 stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1308                                     tx_q->dma_tx_phy, desc_size);
1309         }
1310 }
1311
1312 static void stmmac_display_rings(struct stmmac_priv *priv,
1313                                  struct stmmac_dma_conf *dma_conf)
1314 {
1315         /* Display RX ring */
1316         stmmac_display_rx_rings(priv, dma_conf);
1317
1318         /* Display TX ring */
1319         stmmac_display_tx_rings(priv, dma_conf);
1320 }
1321
1322 static int stmmac_set_bfsize(int mtu, int bufsize)
1323 {
1324         int ret = bufsize;
1325
1326         if (mtu >= BUF_SIZE_8KiB)
1327                 ret = BUF_SIZE_16KiB;
1328         else if (mtu >= BUF_SIZE_4KiB)
1329                 ret = BUF_SIZE_8KiB;
1330         else if (mtu >= BUF_SIZE_2KiB)
1331                 ret = BUF_SIZE_4KiB;
1332         else if (mtu > DEFAULT_BUFSIZE)
1333                 ret = BUF_SIZE_2KiB;
1334         else
1335                 ret = DEFAULT_BUFSIZE;
1336
1337         return ret;
1338 }
1339
1340 /**
1341  * stmmac_clear_rx_descriptors - clear RX descriptors
1342  * @priv: driver private structure
1343  * @dma_conf: structure to take the dma data
1344  * @queue: RX queue index
1345  * Description: this function is called to clear the RX descriptors
1346  * in case of both basic and extended descriptors are used.
1347  */
1348 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1349                                         struct stmmac_dma_conf *dma_conf,
1350                                         u32 queue)
1351 {
1352         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1353         int i;
1354
1355         /* Clear the RX descriptors */
1356         for (i = 0; i < dma_conf->dma_rx_size; i++)
1357                 if (priv->extend_desc)
1358                         stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1359                                         priv->use_riwt, priv->mode,
1360                                         (i == dma_conf->dma_rx_size - 1),
1361                                         dma_conf->dma_buf_sz);
1362                 else
1363                         stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1364                                         priv->use_riwt, priv->mode,
1365                                         (i == dma_conf->dma_rx_size - 1),
1366                                         dma_conf->dma_buf_sz);
1367 }
1368
1369 /**
1370  * stmmac_clear_tx_descriptors - clear tx descriptors
1371  * @priv: driver private structure
1372  * @dma_conf: structure to take the dma data
1373  * @queue: TX queue index.
1374  * Description: this function is called to clear the TX descriptors
1375  * in case of both basic and extended descriptors are used.
1376  */
1377 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1378                                         struct stmmac_dma_conf *dma_conf,
1379                                         u32 queue)
1380 {
1381         struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1382         int i;
1383
1384         /* Clear the TX descriptors */
1385         for (i = 0; i < dma_conf->dma_tx_size; i++) {
1386                 int last = (i == (dma_conf->dma_tx_size - 1));
1387                 struct dma_desc *p;
1388
1389                 if (priv->extend_desc)
1390                         p = &tx_q->dma_etx[i].basic;
1391                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1392                         p = &tx_q->dma_entx[i].basic;
1393                 else
1394                         p = &tx_q->dma_tx[i];
1395
1396                 stmmac_init_tx_desc(priv, p, priv->mode, last);
1397         }
1398 }
1399
1400 /**
1401  * stmmac_clear_descriptors - clear descriptors
1402  * @priv: driver private structure
1403  * @dma_conf: structure to take the dma data
1404  * Description: this function is called to clear the TX and RX descriptors
1405  * in case of both basic and extended descriptors are used.
1406  */
1407 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1408                                      struct stmmac_dma_conf *dma_conf)
1409 {
1410         u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1411         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1412         u32 queue;
1413
1414         /* Clear the RX descriptors */
1415         for (queue = 0; queue < rx_queue_cnt; queue++)
1416                 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1417
1418         /* Clear the TX descriptors */
1419         for (queue = 0; queue < tx_queue_cnt; queue++)
1420                 stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1421 }
1422
1423 /**
1424  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1425  * @priv: driver private structure
1426  * @dma_conf: structure to take the dma data
1427  * @p: descriptor pointer
1428  * @i: descriptor index
1429  * @flags: gfp flag
1430  * @queue: RX queue index
1431  * Description: this function is called to allocate a receive buffer, perform
1432  * the DMA mapping and init the descriptor.
1433  */
1434 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1435                                   struct stmmac_dma_conf *dma_conf,
1436                                   struct dma_desc *p,
1437                                   int i, gfp_t flags, u32 queue)
1438 {
1439         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1440         struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1441         gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1442
1443         if (priv->dma_cap.host_dma_width <= 32)
1444                 gfp |= GFP_DMA32;
1445
1446         if (!buf->page) {
1447                 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1448                 if (!buf->page)
1449                         return -ENOMEM;
1450                 buf->page_offset = stmmac_rx_offset(priv);
1451         }
1452
1453         if (priv->sph && !buf->sec_page) {
1454                 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1455                 if (!buf->sec_page)
1456                         return -ENOMEM;
1457
1458                 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1459                 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1460         } else {
1461                 buf->sec_page = NULL;
1462                 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1463         }
1464
1465         buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1466
1467         stmmac_set_desc_addr(priv, p, buf->addr);
1468         if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1469                 stmmac_init_desc3(priv, p);
1470
1471         return 0;
1472 }
1473
1474 /**
1475  * stmmac_free_rx_buffer - free RX dma buffers
1476  * @priv: private structure
1477  * @rx_q: RX queue
1478  * @i: buffer index.
1479  */
1480 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1481                                   struct stmmac_rx_queue *rx_q,
1482                                   int i)
1483 {
1484         struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1485
1486         if (buf->page)
1487                 page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1488         buf->page = NULL;
1489
1490         if (buf->sec_page)
1491                 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1492         buf->sec_page = NULL;
1493 }
1494
1495 /**
1496  * stmmac_free_tx_buffer - free RX dma buffers
1497  * @priv: private structure
1498  * @dma_conf: structure to take the dma data
1499  * @queue: RX queue index
1500  * @i: buffer index.
1501  */
1502 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1503                                   struct stmmac_dma_conf *dma_conf,
1504                                   u32 queue, int i)
1505 {
1506         struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1507
1508         if (tx_q->tx_skbuff_dma[i].buf &&
1509             tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1510                 if (tx_q->tx_skbuff_dma[i].map_as_page)
1511                         dma_unmap_page(priv->device,
1512                                        tx_q->tx_skbuff_dma[i].buf,
1513                                        tx_q->tx_skbuff_dma[i].len,
1514                                        DMA_TO_DEVICE);
1515                 else
1516                         dma_unmap_single(priv->device,
1517                                          tx_q->tx_skbuff_dma[i].buf,
1518                                          tx_q->tx_skbuff_dma[i].len,
1519                                          DMA_TO_DEVICE);
1520         }
1521
1522         if (tx_q->xdpf[i] &&
1523             (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1524              tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1525                 xdp_return_frame(tx_q->xdpf[i]);
1526                 tx_q->xdpf[i] = NULL;
1527         }
1528
1529         if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1530                 tx_q->xsk_frames_done++;
1531
1532         if (tx_q->tx_skbuff[i] &&
1533             tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1534                 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1535                 tx_q->tx_skbuff[i] = NULL;
1536         }
1537
1538         tx_q->tx_skbuff_dma[i].buf = 0;
1539         tx_q->tx_skbuff_dma[i].map_as_page = false;
1540 }
1541
1542 /**
1543  * dma_free_rx_skbufs - free RX dma buffers
1544  * @priv: private structure
1545  * @dma_conf: structure to take the dma data
1546  * @queue: RX queue index
1547  */
1548 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1549                                struct stmmac_dma_conf *dma_conf,
1550                                u32 queue)
1551 {
1552         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1553         int i;
1554
1555         for (i = 0; i < dma_conf->dma_rx_size; i++)
1556                 stmmac_free_rx_buffer(priv, rx_q, i);
1557 }
1558
1559 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1560                                    struct stmmac_dma_conf *dma_conf,
1561                                    u32 queue, gfp_t flags)
1562 {
1563         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1564         int i;
1565
1566         for (i = 0; i < dma_conf->dma_rx_size; i++) {
1567                 struct dma_desc *p;
1568                 int ret;
1569
1570                 if (priv->extend_desc)
1571                         p = &((rx_q->dma_erx + i)->basic);
1572                 else
1573                         p = rx_q->dma_rx + i;
1574
1575                 ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1576                                              queue);
1577                 if (ret)
1578                         return ret;
1579
1580                 rx_q->buf_alloc_num++;
1581         }
1582
1583         return 0;
1584 }
1585
1586 /**
1587  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1588  * @priv: private structure
1589  * @dma_conf: structure to take the dma data
1590  * @queue: RX queue index
1591  */
1592 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1593                                 struct stmmac_dma_conf *dma_conf,
1594                                 u32 queue)
1595 {
1596         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1597         int i;
1598
1599         for (i = 0; i < dma_conf->dma_rx_size; i++) {
1600                 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1601
1602                 if (!buf->xdp)
1603                         continue;
1604
1605                 xsk_buff_free(buf->xdp);
1606                 buf->xdp = NULL;
1607         }
1608 }
1609
1610 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1611                                       struct stmmac_dma_conf *dma_conf,
1612                                       u32 queue)
1613 {
1614         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1615         int i;
1616
1617         /* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1618          * in struct xdp_buff_xsk to stash driver specific information. Thus,
1619          * use this macro to make sure no size violations.
1620          */
1621         XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1622
1623         for (i = 0; i < dma_conf->dma_rx_size; i++) {
1624                 struct stmmac_rx_buffer *buf;
1625                 dma_addr_t dma_addr;
1626                 struct dma_desc *p;
1627
1628                 if (priv->extend_desc)
1629                         p = (struct dma_desc *)(rx_q->dma_erx + i);
1630                 else
1631                         p = rx_q->dma_rx + i;
1632
1633                 buf = &rx_q->buf_pool[i];
1634
1635                 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1636                 if (!buf->xdp)
1637                         return -ENOMEM;
1638
1639                 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1640                 stmmac_set_desc_addr(priv, p, dma_addr);
1641                 rx_q->buf_alloc_num++;
1642         }
1643
1644         return 0;
1645 }
1646
1647 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1648 {
1649         if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1650                 return NULL;
1651
1652         return xsk_get_pool_from_qid(priv->dev, queue);
1653 }
1654
1655 /**
1656  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1657  * @priv: driver private structure
1658  * @dma_conf: structure to take the dma data
1659  * @queue: RX queue index
1660  * @flags: gfp flag.
1661  * Description: this function initializes the DMA RX descriptors
1662  * and allocates the socket buffers. It supports the chained and ring
1663  * modes.
1664  */
1665 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1666                                     struct stmmac_dma_conf *dma_conf,
1667                                     u32 queue, gfp_t flags)
1668 {
1669         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1670         int ret;
1671
1672         netif_dbg(priv, probe, priv->dev,
1673                   "(%s) dma_rx_phy=0x%08x\n", __func__,
1674                   (u32)rx_q->dma_rx_phy);
1675
1676         stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1677
1678         xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1679
1680         rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1681
1682         if (rx_q->xsk_pool) {
1683                 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1684                                                    MEM_TYPE_XSK_BUFF_POOL,
1685                                                    NULL));
1686                 netdev_info(priv->dev,
1687                             "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1688                             rx_q->queue_index);
1689                 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1690         } else {
1691                 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1692                                                    MEM_TYPE_PAGE_POOL,
1693                                                    rx_q->page_pool));
1694                 netdev_info(priv->dev,
1695                             "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1696                             rx_q->queue_index);
1697         }
1698
1699         if (rx_q->xsk_pool) {
1700                 /* RX XDP ZC buffer pool may not be populated, e.g.
1701                  * xdpsock TX-only.
1702                  */
1703                 stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1704         } else {
1705                 ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1706                 if (ret < 0)
1707                         return -ENOMEM;
1708         }
1709
1710         /* Setup the chained descriptor addresses */
1711         if (priv->mode == STMMAC_CHAIN_MODE) {
1712                 if (priv->extend_desc)
1713                         stmmac_mode_init(priv, rx_q->dma_erx,
1714                                          rx_q->dma_rx_phy,
1715                                          dma_conf->dma_rx_size, 1);
1716                 else
1717                         stmmac_mode_init(priv, rx_q->dma_rx,
1718                                          rx_q->dma_rx_phy,
1719                                          dma_conf->dma_rx_size, 0);
1720         }
1721
1722         return 0;
1723 }
1724
1725 static int init_dma_rx_desc_rings(struct net_device *dev,
1726                                   struct stmmac_dma_conf *dma_conf,
1727                                   gfp_t flags)
1728 {
1729         struct stmmac_priv *priv = netdev_priv(dev);
1730         u32 rx_count = priv->plat->rx_queues_to_use;
1731         int queue;
1732         int ret;
1733
1734         /* RX INITIALIZATION */
1735         netif_dbg(priv, probe, priv->dev,
1736                   "SKB addresses:\nskb\t\tskb data\tdma data\n");
1737
1738         for (queue = 0; queue < rx_count; queue++) {
1739                 ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1740                 if (ret)
1741                         goto err_init_rx_buffers;
1742         }
1743
1744         return 0;
1745
1746 err_init_rx_buffers:
1747         while (queue >= 0) {
1748                 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1749
1750                 if (rx_q->xsk_pool)
1751                         dma_free_rx_xskbufs(priv, dma_conf, queue);
1752                 else
1753                         dma_free_rx_skbufs(priv, dma_conf, queue);
1754
1755                 rx_q->buf_alloc_num = 0;
1756                 rx_q->xsk_pool = NULL;
1757
1758                 queue--;
1759         }
1760
1761         return ret;
1762 }
1763
1764 /**
1765  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1766  * @priv: driver private structure
1767  * @dma_conf: structure to take the dma data
1768  * @queue: TX queue index
1769  * Description: this function initializes the DMA TX descriptors
1770  * and allocates the socket buffers. It supports the chained and ring
1771  * modes.
1772  */
1773 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1774                                     struct stmmac_dma_conf *dma_conf,
1775                                     u32 queue)
1776 {
1777         struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1778         int i;
1779
1780         netif_dbg(priv, probe, priv->dev,
1781                   "(%s) dma_tx_phy=0x%08x\n", __func__,
1782                   (u32)tx_q->dma_tx_phy);
1783
1784         /* Setup the chained descriptor addresses */
1785         if (priv->mode == STMMAC_CHAIN_MODE) {
1786                 if (priv->extend_desc)
1787                         stmmac_mode_init(priv, tx_q->dma_etx,
1788                                          tx_q->dma_tx_phy,
1789                                          dma_conf->dma_tx_size, 1);
1790                 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1791                         stmmac_mode_init(priv, tx_q->dma_tx,
1792                                          tx_q->dma_tx_phy,
1793                                          dma_conf->dma_tx_size, 0);
1794         }
1795
1796         tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1797
1798         for (i = 0; i < dma_conf->dma_tx_size; i++) {
1799                 struct dma_desc *p;
1800
1801                 if (priv->extend_desc)
1802                         p = &((tx_q->dma_etx + i)->basic);
1803                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1804                         p = &((tx_q->dma_entx + i)->basic);
1805                 else
1806                         p = tx_q->dma_tx + i;
1807
1808                 stmmac_clear_desc(priv, p);
1809
1810                 tx_q->tx_skbuff_dma[i].buf = 0;
1811                 tx_q->tx_skbuff_dma[i].map_as_page = false;
1812                 tx_q->tx_skbuff_dma[i].len = 0;
1813                 tx_q->tx_skbuff_dma[i].last_segment = false;
1814                 tx_q->tx_skbuff[i] = NULL;
1815         }
1816
1817         return 0;
1818 }
1819
1820 static int init_dma_tx_desc_rings(struct net_device *dev,
1821                                   struct stmmac_dma_conf *dma_conf)
1822 {
1823         struct stmmac_priv *priv = netdev_priv(dev);
1824         u32 tx_queue_cnt;
1825         u32 queue;
1826
1827         tx_queue_cnt = priv->plat->tx_queues_to_use;
1828
1829         for (queue = 0; queue < tx_queue_cnt; queue++)
1830                 __init_dma_tx_desc_rings(priv, dma_conf, queue);
1831
1832         return 0;
1833 }
1834
1835 /**
1836  * init_dma_desc_rings - init the RX/TX descriptor rings
1837  * @dev: net device structure
1838  * @dma_conf: structure to take the dma data
1839  * @flags: gfp flag.
1840  * Description: this function initializes the DMA RX/TX descriptors
1841  * and allocates the socket buffers. It supports the chained and ring
1842  * modes.
1843  */
1844 static int init_dma_desc_rings(struct net_device *dev,
1845                                struct stmmac_dma_conf *dma_conf,
1846                                gfp_t flags)
1847 {
1848         struct stmmac_priv *priv = netdev_priv(dev);
1849         int ret;
1850
1851         ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1852         if (ret)
1853                 return ret;
1854
1855         ret = init_dma_tx_desc_rings(dev, dma_conf);
1856
1857         stmmac_clear_descriptors(priv, dma_conf);
1858
1859         if (netif_msg_hw(priv))
1860                 stmmac_display_rings(priv, dma_conf);
1861
1862         return ret;
1863 }
1864
1865 /**
1866  * dma_free_tx_skbufs - free TX dma buffers
1867  * @priv: private structure
1868  * @dma_conf: structure to take the dma data
1869  * @queue: TX queue index
1870  */
1871 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1872                                struct stmmac_dma_conf *dma_conf,
1873                                u32 queue)
1874 {
1875         struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1876         int i;
1877
1878         tx_q->xsk_frames_done = 0;
1879
1880         for (i = 0; i < dma_conf->dma_tx_size; i++)
1881                 stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1882
1883         if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1884                 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1885                 tx_q->xsk_frames_done = 0;
1886                 tx_q->xsk_pool = NULL;
1887         }
1888 }
1889
1890 /**
1891  * stmmac_free_tx_skbufs - free TX skb buffers
1892  * @priv: private structure
1893  */
1894 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1895 {
1896         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1897         u32 queue;
1898
1899         for (queue = 0; queue < tx_queue_cnt; queue++)
1900                 dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1901 }
1902
1903 /**
1904  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1905  * @priv: private structure
1906  * @dma_conf: structure to take the dma data
1907  * @queue: RX queue index
1908  */
1909 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1910                                          struct stmmac_dma_conf *dma_conf,
1911                                          u32 queue)
1912 {
1913         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1914
1915         /* Release the DMA RX socket buffers */
1916         if (rx_q->xsk_pool)
1917                 dma_free_rx_xskbufs(priv, dma_conf, queue);
1918         else
1919                 dma_free_rx_skbufs(priv, dma_conf, queue);
1920
1921         rx_q->buf_alloc_num = 0;
1922         rx_q->xsk_pool = NULL;
1923
1924         /* Free DMA regions of consistent memory previously allocated */
1925         if (!priv->extend_desc)
1926                 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1927                                   sizeof(struct dma_desc),
1928                                   rx_q->dma_rx, rx_q->dma_rx_phy);
1929         else
1930                 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1931                                   sizeof(struct dma_extended_desc),
1932                                   rx_q->dma_erx, rx_q->dma_rx_phy);
1933
1934         if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1935                 xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1936
1937         kfree(rx_q->buf_pool);
1938         if (rx_q->page_pool)
1939                 page_pool_destroy(rx_q->page_pool);
1940 }
1941
1942 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1943                                        struct stmmac_dma_conf *dma_conf)
1944 {
1945         u32 rx_count = priv->plat->rx_queues_to_use;
1946         u32 queue;
1947
1948         /* Free RX queue resources */
1949         for (queue = 0; queue < rx_count; queue++)
1950                 __free_dma_rx_desc_resources(priv, dma_conf, queue);
1951 }
1952
1953 /**
1954  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1955  * @priv: private structure
1956  * @dma_conf: structure to take the dma data
1957  * @queue: TX queue index
1958  */
1959 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1960                                          struct stmmac_dma_conf *dma_conf,
1961                                          u32 queue)
1962 {
1963         struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1964         size_t size;
1965         void *addr;
1966
1967         /* Release the DMA TX socket buffers */
1968         dma_free_tx_skbufs(priv, dma_conf, queue);
1969
1970         if (priv->extend_desc) {
1971                 size = sizeof(struct dma_extended_desc);
1972                 addr = tx_q->dma_etx;
1973         } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1974                 size = sizeof(struct dma_edesc);
1975                 addr = tx_q->dma_entx;
1976         } else {
1977                 size = sizeof(struct dma_desc);
1978                 addr = tx_q->dma_tx;
1979         }
1980
1981         size *= dma_conf->dma_tx_size;
1982
1983         dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1984
1985         kfree(tx_q->tx_skbuff_dma);
1986         kfree(tx_q->tx_skbuff);
1987 }
1988
1989 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
1990                                        struct stmmac_dma_conf *dma_conf)
1991 {
1992         u32 tx_count = priv->plat->tx_queues_to_use;
1993         u32 queue;
1994
1995         /* Free TX queue resources */
1996         for (queue = 0; queue < tx_count; queue++)
1997                 __free_dma_tx_desc_resources(priv, dma_conf, queue);
1998 }
1999
2000 /**
2001  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2002  * @priv: private structure
2003  * @dma_conf: structure to take the dma data
2004  * @queue: RX queue index
2005  * Description: according to which descriptor can be used (extend or basic)
2006  * this function allocates the resources for TX and RX paths. In case of
2007  * reception, for example, it pre-allocated the RX socket buffer in order to
2008  * allow zero-copy mechanism.
2009  */
2010 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2011                                          struct stmmac_dma_conf *dma_conf,
2012                                          u32 queue)
2013 {
2014         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2015         struct stmmac_channel *ch = &priv->channel[queue];
2016         bool xdp_prog = stmmac_xdp_is_enabled(priv);
2017         struct page_pool_params pp_params = { 0 };
2018         unsigned int num_pages;
2019         unsigned int napi_id;
2020         int ret;
2021
2022         rx_q->queue_index = queue;
2023         rx_q->priv_data = priv;
2024
2025         pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2026         pp_params.pool_size = dma_conf->dma_rx_size;
2027         num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2028         pp_params.order = ilog2(num_pages);
2029         pp_params.nid = dev_to_node(priv->device);
2030         pp_params.dev = priv->device;
2031         pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2032         pp_params.offset = stmmac_rx_offset(priv);
2033         pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2034
2035         rx_q->page_pool = page_pool_create(&pp_params);
2036         if (IS_ERR(rx_q->page_pool)) {
2037                 ret = PTR_ERR(rx_q->page_pool);
2038                 rx_q->page_pool = NULL;
2039                 return ret;
2040         }
2041
2042         rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2043                                  sizeof(*rx_q->buf_pool),
2044                                  GFP_KERNEL);
2045         if (!rx_q->buf_pool)
2046                 return -ENOMEM;
2047
2048         if (priv->extend_desc) {
2049                 rx_q->dma_erx = dma_alloc_coherent(priv->device,
2050                                                    dma_conf->dma_rx_size *
2051                                                    sizeof(struct dma_extended_desc),
2052                                                    &rx_q->dma_rx_phy,
2053                                                    GFP_KERNEL);
2054                 if (!rx_q->dma_erx)
2055                         return -ENOMEM;
2056
2057         } else {
2058                 rx_q->dma_rx = dma_alloc_coherent(priv->device,
2059                                                   dma_conf->dma_rx_size *
2060                                                   sizeof(struct dma_desc),
2061                                                   &rx_q->dma_rx_phy,
2062                                                   GFP_KERNEL);
2063                 if (!rx_q->dma_rx)
2064                         return -ENOMEM;
2065         }
2066
2067         if (stmmac_xdp_is_enabled(priv) &&
2068             test_bit(queue, priv->af_xdp_zc_qps))
2069                 napi_id = ch->rxtx_napi.napi_id;
2070         else
2071                 napi_id = ch->rx_napi.napi_id;
2072
2073         ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2074                                rx_q->queue_index,
2075                                napi_id);
2076         if (ret) {
2077                 netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2078                 return -EINVAL;
2079         }
2080
2081         return 0;
2082 }
2083
2084 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2085                                        struct stmmac_dma_conf *dma_conf)
2086 {
2087         u32 rx_count = priv->plat->rx_queues_to_use;
2088         u32 queue;
2089         int ret;
2090
2091         /* RX queues buffers and DMA */
2092         for (queue = 0; queue < rx_count; queue++) {
2093                 ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2094                 if (ret)
2095                         goto err_dma;
2096         }
2097
2098         return 0;
2099
2100 err_dma:
2101         free_dma_rx_desc_resources(priv, dma_conf);
2102
2103         return ret;
2104 }
2105
2106 /**
2107  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2108  * @priv: private structure
2109  * @dma_conf: structure to take the dma data
2110  * @queue: TX queue index
2111  * Description: according to which descriptor can be used (extend or basic)
2112  * this function allocates the resources for TX and RX paths. In case of
2113  * reception, for example, it pre-allocated the RX socket buffer in order to
2114  * allow zero-copy mechanism.
2115  */
2116 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2117                                          struct stmmac_dma_conf *dma_conf,
2118                                          u32 queue)
2119 {
2120         struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2121         size_t size;
2122         void *addr;
2123
2124         tx_q->queue_index = queue;
2125         tx_q->priv_data = priv;
2126
2127         tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2128                                       sizeof(*tx_q->tx_skbuff_dma),
2129                                       GFP_KERNEL);
2130         if (!tx_q->tx_skbuff_dma)
2131                 return -ENOMEM;
2132
2133         tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2134                                   sizeof(struct sk_buff *),
2135                                   GFP_KERNEL);
2136         if (!tx_q->tx_skbuff)
2137                 return -ENOMEM;
2138
2139         if (priv->extend_desc)
2140                 size = sizeof(struct dma_extended_desc);
2141         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2142                 size = sizeof(struct dma_edesc);
2143         else
2144                 size = sizeof(struct dma_desc);
2145
2146         size *= dma_conf->dma_tx_size;
2147
2148         addr = dma_alloc_coherent(priv->device, size,
2149                                   &tx_q->dma_tx_phy, GFP_KERNEL);
2150         if (!addr)
2151                 return -ENOMEM;
2152
2153         if (priv->extend_desc)
2154                 tx_q->dma_etx = addr;
2155         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2156                 tx_q->dma_entx = addr;
2157         else
2158                 tx_q->dma_tx = addr;
2159
2160         return 0;
2161 }
2162
2163 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2164                                        struct stmmac_dma_conf *dma_conf)
2165 {
2166         u32 tx_count = priv->plat->tx_queues_to_use;
2167         u32 queue;
2168         int ret;
2169
2170         /* TX queues buffers and DMA */
2171         for (queue = 0; queue < tx_count; queue++) {
2172                 ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2173                 if (ret)
2174                         goto err_dma;
2175         }
2176
2177         return 0;
2178
2179 err_dma:
2180         free_dma_tx_desc_resources(priv, dma_conf);
2181         return ret;
2182 }
2183
2184 /**
2185  * alloc_dma_desc_resources - alloc TX/RX resources.
2186  * @priv: private structure
2187  * @dma_conf: structure to take the dma data
2188  * Description: according to which descriptor can be used (extend or basic)
2189  * this function allocates the resources for TX and RX paths. In case of
2190  * reception, for example, it pre-allocated the RX socket buffer in order to
2191  * allow zero-copy mechanism.
2192  */
2193 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2194                                     struct stmmac_dma_conf *dma_conf)
2195 {
2196         /* RX Allocation */
2197         int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2198
2199         if (ret)
2200                 return ret;
2201
2202         ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2203
2204         return ret;
2205 }
2206
2207 /**
2208  * free_dma_desc_resources - free dma desc resources
2209  * @priv: private structure
2210  * @dma_conf: structure to take the dma data
2211  */
2212 static void free_dma_desc_resources(struct stmmac_priv *priv,
2213                                     struct stmmac_dma_conf *dma_conf)
2214 {
2215         /* Release the DMA TX socket buffers */
2216         free_dma_tx_desc_resources(priv, dma_conf);
2217
2218         /* Release the DMA RX socket buffers later
2219          * to ensure all pending XDP_TX buffers are returned.
2220          */
2221         free_dma_rx_desc_resources(priv, dma_conf);
2222 }
2223
2224 /**
2225  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2226  *  @priv: driver private structure
2227  *  Description: It is used for enabling the rx queues in the MAC
2228  */
2229 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2230 {
2231         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2232         int queue;
2233         u8 mode;
2234
2235         for (queue = 0; queue < rx_queues_count; queue++) {
2236                 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2237                 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2238         }
2239 }
2240
2241 /**
2242  * stmmac_start_rx_dma - start RX DMA channel
2243  * @priv: driver private structure
2244  * @chan: RX channel index
2245  * Description:
2246  * This starts a RX DMA channel
2247  */
2248 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2249 {
2250         netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2251         stmmac_start_rx(priv, priv->ioaddr, chan);
2252 }
2253
2254 /**
2255  * stmmac_start_tx_dma - start TX DMA channel
2256  * @priv: driver private structure
2257  * @chan: TX channel index
2258  * Description:
2259  * This starts a TX DMA channel
2260  */
2261 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2262 {
2263         netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2264         stmmac_start_tx(priv, priv->ioaddr, chan);
2265 }
2266
2267 /**
2268  * stmmac_stop_rx_dma - stop RX DMA channel
2269  * @priv: driver private structure
2270  * @chan: RX channel index
2271  * Description:
2272  * This stops a RX DMA channel
2273  */
2274 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2275 {
2276         netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2277         stmmac_stop_rx(priv, priv->ioaddr, chan);
2278 }
2279
2280 /**
2281  * stmmac_stop_tx_dma - stop TX DMA channel
2282  * @priv: driver private structure
2283  * @chan: TX channel index
2284  * Description:
2285  * This stops a TX DMA channel
2286  */
2287 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2288 {
2289         netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2290         stmmac_stop_tx(priv, priv->ioaddr, chan);
2291 }
2292
2293 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2294 {
2295         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2296         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2297         u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2298         u32 chan;
2299
2300         for (chan = 0; chan < dma_csr_ch; chan++) {
2301                 struct stmmac_channel *ch = &priv->channel[chan];
2302                 unsigned long flags;
2303
2304                 spin_lock_irqsave(&ch->lock, flags);
2305                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2306                 spin_unlock_irqrestore(&ch->lock, flags);
2307         }
2308 }
2309
2310 /**
2311  * stmmac_start_all_dma - start all RX and TX DMA channels
2312  * @priv: driver private structure
2313  * Description:
2314  * This starts all the RX and TX DMA channels
2315  */
2316 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2317 {
2318         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2319         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2320         u32 chan = 0;
2321
2322         for (chan = 0; chan < rx_channels_count; chan++)
2323                 stmmac_start_rx_dma(priv, chan);
2324
2325         for (chan = 0; chan < tx_channels_count; chan++)
2326                 stmmac_start_tx_dma(priv, chan);
2327 }
2328
2329 /**
2330  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2331  * @priv: driver private structure
2332  * Description:
2333  * This stops the RX and TX DMA channels
2334  */
2335 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2336 {
2337         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2338         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2339         u32 chan = 0;
2340
2341         for (chan = 0; chan < rx_channels_count; chan++)
2342                 stmmac_stop_rx_dma(priv, chan);
2343
2344         for (chan = 0; chan < tx_channels_count; chan++)
2345                 stmmac_stop_tx_dma(priv, chan);
2346 }
2347
2348 /**
2349  *  stmmac_dma_operation_mode - HW DMA operation mode
2350  *  @priv: driver private structure
2351  *  Description: it is used for configuring the DMA operation mode register in
2352  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2353  */
2354 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2355 {
2356         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2357         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2358         int rxfifosz = priv->plat->rx_fifo_size;
2359         int txfifosz = priv->plat->tx_fifo_size;
2360         u32 txmode = 0;
2361         u32 rxmode = 0;
2362         u32 chan = 0;
2363         u8 qmode = 0;
2364
2365         if (rxfifosz == 0)
2366                 rxfifosz = priv->dma_cap.rx_fifo_size;
2367         if (txfifosz == 0)
2368                 txfifosz = priv->dma_cap.tx_fifo_size;
2369
2370         /* Adjust for real per queue fifo size */
2371         rxfifosz /= rx_channels_count;
2372         txfifosz /= tx_channels_count;
2373
2374         if (priv->plat->force_thresh_dma_mode) {
2375                 txmode = tc;
2376                 rxmode = tc;
2377         } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2378                 /*
2379                  * In case of GMAC, SF mode can be enabled
2380                  * to perform the TX COE in HW. This depends on:
2381                  * 1) TX COE if actually supported
2382                  * 2) There is no bugged Jumbo frame support
2383                  *    that needs to not insert csum in the TDES.
2384                  */
2385                 txmode = SF_DMA_MODE;
2386                 rxmode = SF_DMA_MODE;
2387                 priv->xstats.threshold = SF_DMA_MODE;
2388         } else {
2389                 txmode = tc;
2390                 rxmode = SF_DMA_MODE;
2391         }
2392
2393         /* configure all channels */
2394         for (chan = 0; chan < rx_channels_count; chan++) {
2395                 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2396                 u32 buf_size;
2397
2398                 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2399
2400                 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2401                                 rxfifosz, qmode);
2402
2403                 if (rx_q->xsk_pool) {
2404                         buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2405                         stmmac_set_dma_bfsize(priv, priv->ioaddr,
2406                                               buf_size,
2407                                               chan);
2408                 } else {
2409                         stmmac_set_dma_bfsize(priv, priv->ioaddr,
2410                                               priv->dma_conf.dma_buf_sz,
2411                                               chan);
2412                 }
2413         }
2414
2415         for (chan = 0; chan < tx_channels_count; chan++) {
2416                 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2417
2418                 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2419                                 txfifosz, qmode);
2420         }
2421 }
2422
2423 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2424 {
2425         struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2426         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2427         struct xsk_buff_pool *pool = tx_q->xsk_pool;
2428         unsigned int entry = tx_q->cur_tx;
2429         struct dma_desc *tx_desc = NULL;
2430         struct xdp_desc xdp_desc;
2431         bool work_done = true;
2432
2433         /* Avoids TX time-out as we are sharing with slow path */
2434         txq_trans_cond_update(nq);
2435
2436         budget = min(budget, stmmac_tx_avail(priv, queue));
2437
2438         while (budget-- > 0) {
2439                 dma_addr_t dma_addr;
2440                 bool set_ic;
2441
2442                 /* We are sharing with slow path and stop XSK TX desc submission when
2443                  * available TX ring is less than threshold.
2444                  */
2445                 if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2446                     !netif_carrier_ok(priv->dev)) {
2447                         work_done = false;
2448                         break;
2449                 }
2450
2451                 if (!xsk_tx_peek_desc(pool, &xdp_desc))
2452                         break;
2453
2454                 if (likely(priv->extend_desc))
2455                         tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2456                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2457                         tx_desc = &tx_q->dma_entx[entry].basic;
2458                 else
2459                         tx_desc = tx_q->dma_tx + entry;
2460
2461                 dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2462                 xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2463
2464                 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2465
2466                 /* To return XDP buffer to XSK pool, we simple call
2467                  * xsk_tx_completed(), so we don't need to fill up
2468                  * 'buf' and 'xdpf'.
2469                  */
2470                 tx_q->tx_skbuff_dma[entry].buf = 0;
2471                 tx_q->xdpf[entry] = NULL;
2472
2473                 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2474                 tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2475                 tx_q->tx_skbuff_dma[entry].last_segment = true;
2476                 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2477
2478                 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2479
2480                 tx_q->tx_count_frames++;
2481
2482                 if (!priv->tx_coal_frames[queue])
2483                         set_ic = false;
2484                 else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2485                         set_ic = true;
2486                 else
2487                         set_ic = false;
2488
2489                 if (set_ic) {
2490                         tx_q->tx_count_frames = 0;
2491                         stmmac_set_tx_ic(priv, tx_desc);
2492                         priv->xstats.tx_set_ic_bit++;
2493                 }
2494
2495                 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2496                                        true, priv->mode, true, true,
2497                                        xdp_desc.len);
2498
2499                 stmmac_enable_dma_transmission(priv, priv->ioaddr);
2500
2501                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2502                 entry = tx_q->cur_tx;
2503         }
2504
2505         if (tx_desc) {
2506                 stmmac_flush_tx_descriptors(priv, queue);
2507                 xsk_tx_release(pool);
2508         }
2509
2510         /* Return true if all of the 3 conditions are met
2511          *  a) TX Budget is still available
2512          *  b) work_done = true when XSK TX desc peek is empty (no more
2513          *     pending XSK TX for transmission)
2514          */
2515         return !!budget && work_done;
2516 }
2517
2518 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2519 {
2520         if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2521                 tc += 64;
2522
2523                 if (priv->plat->force_thresh_dma_mode)
2524                         stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2525                 else
2526                         stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2527                                                       chan);
2528
2529                 priv->xstats.threshold = tc;
2530         }
2531 }
2532
2533 /**
2534  * stmmac_tx_clean - to manage the transmission completion
2535  * @priv: driver private structure
2536  * @budget: napi budget limiting this functions packet handling
2537  * @queue: TX queue index
2538  * Description: it reclaims the transmit resources after transmission completes.
2539  */
2540 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
2541 {
2542         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2543         unsigned int bytes_compl = 0, pkts_compl = 0;
2544         unsigned int entry, xmits = 0, count = 0;
2545
2546         __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2547
2548         priv->xstats.tx_clean++;
2549
2550         tx_q->xsk_frames_done = 0;
2551
2552         entry = tx_q->dirty_tx;
2553
2554         /* Try to clean all TX complete frame in 1 shot */
2555         while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2556                 struct xdp_frame *xdpf;
2557                 struct sk_buff *skb;
2558                 struct dma_desc *p;
2559                 int status;
2560
2561                 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2562                     tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2563                         xdpf = tx_q->xdpf[entry];
2564                         skb = NULL;
2565                 } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2566                         xdpf = NULL;
2567                         skb = tx_q->tx_skbuff[entry];
2568                 } else {
2569                         xdpf = NULL;
2570                         skb = NULL;
2571                 }
2572
2573                 if (priv->extend_desc)
2574                         p = (struct dma_desc *)(tx_q->dma_etx + entry);
2575                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2576                         p = &tx_q->dma_entx[entry].basic;
2577                 else
2578                         p = tx_q->dma_tx + entry;
2579
2580                 status = stmmac_tx_status(priv, &priv->dev->stats,
2581                                 &priv->xstats, p, priv->ioaddr);
2582                 /* Check if the descriptor is owned by the DMA */
2583                 if (unlikely(status & tx_dma_own))
2584                         break;
2585
2586                 count++;
2587
2588                 /* Make sure descriptor fields are read after reading
2589                  * the own bit.
2590                  */
2591                 dma_rmb();
2592
2593                 /* Just consider the last segment and ...*/
2594                 if (likely(!(status & tx_not_ls))) {
2595                         /* ... verify the status error condition */
2596                         if (unlikely(status & tx_err)) {
2597                                 priv->dev->stats.tx_errors++;
2598                                 if (unlikely(status & tx_err_bump_tc))
2599                                         stmmac_bump_dma_threshold(priv, queue);
2600                         } else {
2601                                 priv->dev->stats.tx_packets++;
2602                                 priv->xstats.tx_pkt_n++;
2603                                 priv->xstats.txq_stats[queue].tx_pkt_n++;
2604                         }
2605                         if (skb)
2606                                 stmmac_get_tx_hwtstamp(priv, p, skb);
2607                 }
2608
2609                 if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2610                            tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2611                         if (tx_q->tx_skbuff_dma[entry].map_as_page)
2612                                 dma_unmap_page(priv->device,
2613                                                tx_q->tx_skbuff_dma[entry].buf,
2614                                                tx_q->tx_skbuff_dma[entry].len,
2615                                                DMA_TO_DEVICE);
2616                         else
2617                                 dma_unmap_single(priv->device,
2618                                                  tx_q->tx_skbuff_dma[entry].buf,
2619                                                  tx_q->tx_skbuff_dma[entry].len,
2620                                                  DMA_TO_DEVICE);
2621                         tx_q->tx_skbuff_dma[entry].buf = 0;
2622                         tx_q->tx_skbuff_dma[entry].len = 0;
2623                         tx_q->tx_skbuff_dma[entry].map_as_page = false;
2624                 }
2625
2626                 stmmac_clean_desc3(priv, tx_q, p);
2627
2628                 tx_q->tx_skbuff_dma[entry].last_segment = false;
2629                 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2630
2631                 if (xdpf &&
2632                     tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2633                         xdp_return_frame_rx_napi(xdpf);
2634                         tx_q->xdpf[entry] = NULL;
2635                 }
2636
2637                 if (xdpf &&
2638                     tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2639                         xdp_return_frame(xdpf);
2640                         tx_q->xdpf[entry] = NULL;
2641                 }
2642
2643                 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2644                         tx_q->xsk_frames_done++;
2645
2646                 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2647                         if (likely(skb)) {
2648                                 pkts_compl++;
2649                                 bytes_compl += skb->len;
2650                                 dev_consume_skb_any(skb);
2651                                 tx_q->tx_skbuff[entry] = NULL;
2652                         }
2653                 }
2654
2655                 stmmac_release_tx_desc(priv, p, priv->mode);
2656
2657                 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2658         }
2659         tx_q->dirty_tx = entry;
2660
2661         netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2662                                   pkts_compl, bytes_compl);
2663
2664         if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2665                                                                 queue))) &&
2666             stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2667
2668                 netif_dbg(priv, tx_done, priv->dev,
2669                           "%s: restart transmit\n", __func__);
2670                 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2671         }
2672
2673         if (tx_q->xsk_pool) {
2674                 bool work_done;
2675
2676                 if (tx_q->xsk_frames_done)
2677                         xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2678
2679                 if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2680                         xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2681
2682                 /* For XSK TX, we try to send as many as possible.
2683                  * If XSK work done (XSK TX desc empty and budget still
2684                  * available), return "budget - 1" to reenable TX IRQ.
2685                  * Else, return "budget" to make NAPI continue polling.
2686                  */
2687                 work_done = stmmac_xdp_xmit_zc(priv, queue,
2688                                                STMMAC_XSK_TX_BUDGET_MAX);
2689                 if (work_done)
2690                         xmits = budget - 1;
2691                 else
2692                         xmits = budget;
2693         }
2694
2695         if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2696             priv->eee_sw_timer_en) {
2697                 if (stmmac_enable_eee_mode(priv))
2698                         mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2699         }
2700
2701         /* We still have pending packets, let's call for a new scheduling */
2702         if (tx_q->dirty_tx != tx_q->cur_tx)
2703                 hrtimer_start(&tx_q->txtimer,
2704                               STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2705                               HRTIMER_MODE_REL);
2706
2707         __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2708
2709         /* Combine decisions from TX clean and XSK TX */
2710         return max(count, xmits);
2711 }
2712
2713 /**
2714  * stmmac_tx_err - to manage the tx error
2715  * @priv: driver private structure
2716  * @chan: channel index
2717  * Description: it cleans the descriptors and restarts the transmission
2718  * in case of transmission errors.
2719  */
2720 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2721 {
2722         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2723
2724         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2725
2726         stmmac_stop_tx_dma(priv, chan);
2727         dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2728         stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2729         stmmac_reset_tx_queue(priv, chan);
2730         stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2731                             tx_q->dma_tx_phy, chan);
2732         stmmac_start_tx_dma(priv, chan);
2733
2734         priv->dev->stats.tx_errors++;
2735         netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2736 }
2737
2738 /**
2739  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2740  *  @priv: driver private structure
2741  *  @txmode: TX operating mode
2742  *  @rxmode: RX operating mode
2743  *  @chan: channel index
2744  *  Description: it is used for configuring of the DMA operation mode in
2745  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2746  *  mode.
2747  */
2748 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2749                                           u32 rxmode, u32 chan)
2750 {
2751         u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2752         u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2753         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2754         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2755         int rxfifosz = priv->plat->rx_fifo_size;
2756         int txfifosz = priv->plat->tx_fifo_size;
2757
2758         if (rxfifosz == 0)
2759                 rxfifosz = priv->dma_cap.rx_fifo_size;
2760         if (txfifosz == 0)
2761                 txfifosz = priv->dma_cap.tx_fifo_size;
2762
2763         /* Adjust for real per queue fifo size */
2764         rxfifosz /= rx_channels_count;
2765         txfifosz /= tx_channels_count;
2766
2767         stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2768         stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2769 }
2770
2771 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2772 {
2773         int ret;
2774
2775         ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2776                         priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2777         if (ret && (ret != -EINVAL)) {
2778                 stmmac_global_err(priv);
2779                 return true;
2780         }
2781
2782         return false;
2783 }
2784
2785 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2786 {
2787         int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2788                                                  &priv->xstats, chan, dir);
2789         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2790         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2791         struct stmmac_channel *ch = &priv->channel[chan];
2792         struct napi_struct *rx_napi;
2793         struct napi_struct *tx_napi;
2794         unsigned long flags;
2795
2796         rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2797         tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2798
2799         if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2800                 if (napi_schedule_prep(rx_napi)) {
2801                         spin_lock_irqsave(&ch->lock, flags);
2802                         stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2803                         spin_unlock_irqrestore(&ch->lock, flags);
2804                         __napi_schedule(rx_napi);
2805                 }
2806         }
2807
2808         if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2809                 if (napi_schedule_prep(tx_napi)) {
2810                         spin_lock_irqsave(&ch->lock, flags);
2811                         stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2812                         spin_unlock_irqrestore(&ch->lock, flags);
2813                         __napi_schedule(tx_napi);
2814                 }
2815         }
2816
2817         return status;
2818 }
2819
2820 /**
2821  * stmmac_dma_interrupt - DMA ISR
2822  * @priv: driver private structure
2823  * Description: this is the DMA ISR. It is called by the main ISR.
2824  * It calls the dwmac dma routine and schedule poll method in case of some
2825  * work can be done.
2826  */
2827 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2828 {
2829         u32 tx_channel_count = priv->plat->tx_queues_to_use;
2830         u32 rx_channel_count = priv->plat->rx_queues_to_use;
2831         u32 channels_to_check = tx_channel_count > rx_channel_count ?
2832                                 tx_channel_count : rx_channel_count;
2833         u32 chan;
2834         int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2835
2836         /* Make sure we never check beyond our status buffer. */
2837         if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2838                 channels_to_check = ARRAY_SIZE(status);
2839
2840         for (chan = 0; chan < channels_to_check; chan++)
2841                 status[chan] = stmmac_napi_check(priv, chan,
2842                                                  DMA_DIR_RXTX);
2843
2844         for (chan = 0; chan < tx_channel_count; chan++) {
2845                 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2846                         /* Try to bump up the dma threshold on this failure */
2847                         stmmac_bump_dma_threshold(priv, chan);
2848                 } else if (unlikely(status[chan] == tx_hard_error)) {
2849                         stmmac_tx_err(priv, chan);
2850                 }
2851         }
2852 }
2853
2854 /**
2855  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2856  * @priv: driver private structure
2857  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2858  */
2859 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2860 {
2861         unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2862                             MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2863
2864         stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2865
2866         if (priv->dma_cap.rmon) {
2867                 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2868                 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2869         } else
2870                 netdev_info(priv->dev, "No MAC Management Counters available\n");
2871 }
2872
2873 /**
2874  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2875  * @priv: driver private structure
2876  * Description:
2877  *  new GMAC chip generations have a new register to indicate the
2878  *  presence of the optional feature/functions.
2879  *  This can be also used to override the value passed through the
2880  *  platform and necessary for old MAC10/100 and GMAC chips.
2881  */
2882 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2883 {
2884         return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2885 }
2886
2887 /**
2888  * stmmac_check_ether_addr - check if the MAC addr is valid
2889  * @priv: driver private structure
2890  * Description:
2891  * it is to verify if the MAC address is valid, in case of failures it
2892  * generates a random MAC address
2893  */
2894 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2895 {
2896         u8 addr[ETH_ALEN];
2897
2898         if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2899                 stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2900                 if (is_valid_ether_addr(addr))
2901                         eth_hw_addr_set(priv->dev, addr);
2902                 else
2903                         eth_hw_addr_random(priv->dev);
2904                 dev_info(priv->device, "device MAC address %pM\n",
2905                          priv->dev->dev_addr);
2906         }
2907 }
2908
2909 /**
2910  * stmmac_init_dma_engine - DMA init.
2911  * @priv: driver private structure
2912  * Description:
2913  * It inits the DMA invoking the specific MAC/GMAC callback.
2914  * Some DMA parameters can be passed from the platform;
2915  * in case of these are not passed a default is kept for the MAC or GMAC.
2916  */
2917 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2918 {
2919         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2920         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2921         u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2922         struct stmmac_rx_queue *rx_q;
2923         struct stmmac_tx_queue *tx_q;
2924         u32 chan = 0;
2925         int atds = 0;
2926         int ret = 0;
2927
2928         if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2929                 dev_err(priv->device, "Invalid DMA configuration\n");
2930                 return -EINVAL;
2931         }
2932
2933         if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2934                 atds = 1;
2935
2936         ret = stmmac_reset(priv, priv->ioaddr);
2937         if (ret) {
2938                 dev_err(priv->device, "Failed to reset the dma\n");
2939                 return ret;
2940         }
2941
2942         /* DMA Configuration */
2943         stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2944
2945         if (priv->plat->axi)
2946                 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2947
2948         /* DMA CSR Channel configuration */
2949         for (chan = 0; chan < dma_csr_ch; chan++) {
2950                 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2951                 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2952         }
2953
2954         /* DMA RX Channel Configuration */
2955         for (chan = 0; chan < rx_channels_count; chan++) {
2956                 rx_q = &priv->dma_conf.rx_queue[chan];
2957
2958                 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2959                                     rx_q->dma_rx_phy, chan);
2960
2961                 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2962                                      (rx_q->buf_alloc_num *
2963                                       sizeof(struct dma_desc));
2964                 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2965                                        rx_q->rx_tail_addr, chan);
2966         }
2967
2968         /* DMA TX Channel Configuration */
2969         for (chan = 0; chan < tx_channels_count; chan++) {
2970                 tx_q = &priv->dma_conf.tx_queue[chan];
2971
2972                 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2973                                     tx_q->dma_tx_phy, chan);
2974
2975                 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2976                 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2977                                        tx_q->tx_tail_addr, chan);
2978         }
2979
2980         return ret;
2981 }
2982
2983 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2984 {
2985         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2986
2987         hrtimer_start(&tx_q->txtimer,
2988                       STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2989                       HRTIMER_MODE_REL);
2990 }
2991
2992 /**
2993  * stmmac_tx_timer - mitigation sw timer for tx.
2994  * @t: data pointer
2995  * Description:
2996  * This is the timer handler to directly invoke the stmmac_tx_clean.
2997  */
2998 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
2999 {
3000         struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3001         struct stmmac_priv *priv = tx_q->priv_data;
3002         struct stmmac_channel *ch;
3003         struct napi_struct *napi;
3004
3005         ch = &priv->channel[tx_q->queue_index];
3006         napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3007
3008         if (likely(napi_schedule_prep(napi))) {
3009                 unsigned long flags;
3010
3011                 spin_lock_irqsave(&ch->lock, flags);
3012                 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3013                 spin_unlock_irqrestore(&ch->lock, flags);
3014                 __napi_schedule(napi);
3015         }
3016
3017         return HRTIMER_NORESTART;
3018 }
3019
3020 /**
3021  * stmmac_init_coalesce - init mitigation options.
3022  * @priv: driver private structure
3023  * Description:
3024  * This inits the coalesce parameters: i.e. timer rate,
3025  * timer handler and default threshold used for enabling the
3026  * interrupt on completion bit.
3027  */
3028 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3029 {
3030         u32 tx_channel_count = priv->plat->tx_queues_to_use;
3031         u32 rx_channel_count = priv->plat->rx_queues_to_use;
3032         u32 chan;
3033
3034         for (chan = 0; chan < tx_channel_count; chan++) {
3035                 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3036
3037                 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3038                 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3039
3040                 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3041                 tx_q->txtimer.function = stmmac_tx_timer;
3042         }
3043
3044         for (chan = 0; chan < rx_channel_count; chan++)
3045                 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3046 }
3047
3048 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3049 {
3050         u32 rx_channels_count = priv->plat->rx_queues_to_use;
3051         u32 tx_channels_count = priv->plat->tx_queues_to_use;
3052         u32 chan;
3053
3054         /* set TX ring length */
3055         for (chan = 0; chan < tx_channels_count; chan++)
3056                 stmmac_set_tx_ring_len(priv, priv->ioaddr,
3057                                        (priv->dma_conf.dma_tx_size - 1), chan);
3058
3059         /* set RX ring length */
3060         for (chan = 0; chan < rx_channels_count; chan++)
3061                 stmmac_set_rx_ring_len(priv, priv->ioaddr,
3062                                        (priv->dma_conf.dma_rx_size - 1), chan);
3063 }
3064
3065 /**
3066  *  stmmac_set_tx_queue_weight - Set TX queue weight
3067  *  @priv: driver private structure
3068  *  Description: It is used for setting TX queues weight
3069  */
3070 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3071 {
3072         u32 tx_queues_count = priv->plat->tx_queues_to_use;
3073         u32 weight;
3074         u32 queue;
3075
3076         for (queue = 0; queue < tx_queues_count; queue++) {
3077                 weight = priv->plat->tx_queues_cfg[queue].weight;
3078                 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3079         }
3080 }
3081
3082 /**
3083  *  stmmac_configure_cbs - Configure CBS in TX queue
3084  *  @priv: driver private structure
3085  *  Description: It is used for configuring CBS in AVB TX queues
3086  */
3087 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3088 {
3089         u32 tx_queues_count = priv->plat->tx_queues_to_use;
3090         u32 mode_to_use;
3091         u32 queue;
3092
3093         /* queue 0 is reserved for legacy traffic */
3094         for (queue = 1; queue < tx_queues_count; queue++) {
3095                 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3096                 if (mode_to_use == MTL_QUEUE_DCB)
3097                         continue;
3098
3099                 stmmac_config_cbs(priv, priv->hw,
3100                                 priv->plat->tx_queues_cfg[queue].send_slope,
3101                                 priv->plat->tx_queues_cfg[queue].idle_slope,
3102                                 priv->plat->tx_queues_cfg[queue].high_credit,
3103                                 priv->plat->tx_queues_cfg[queue].low_credit,
3104                                 queue);
3105         }
3106 }
3107
3108 /**
3109  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3110  *  @priv: driver private structure
3111  *  Description: It is used for mapping RX queues to RX dma channels
3112  */
3113 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3114 {
3115         u32 rx_queues_count = priv->plat->rx_queues_to_use;
3116         u32 queue;
3117         u32 chan;
3118
3119         for (queue = 0; queue < rx_queues_count; queue++) {
3120                 chan = priv->plat->rx_queues_cfg[queue].chan;
3121                 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3122         }
3123 }
3124
3125 /**
3126  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3127  *  @priv: driver private structure
3128  *  Description: It is used for configuring the RX Queue Priority
3129  */
3130 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3131 {
3132         u32 rx_queues_count = priv->plat->rx_queues_to_use;
3133         u32 queue;
3134         u32 prio;
3135
3136         for (queue = 0; queue < rx_queues_count; queue++) {
3137                 if (!priv->plat->rx_queues_cfg[queue].use_prio)
3138                         continue;
3139
3140                 prio = priv->plat->rx_queues_cfg[queue].prio;
3141                 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3142         }
3143 }
3144
3145 /**
3146  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3147  *  @priv: driver private structure
3148  *  Description: It is used for configuring the TX Queue Priority
3149  */
3150 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3151 {
3152         u32 tx_queues_count = priv->plat->tx_queues_to_use;
3153         u32 queue;
3154         u32 prio;
3155
3156         for (queue = 0; queue < tx_queues_count; queue++) {
3157                 if (!priv->plat->tx_queues_cfg[queue].use_prio)
3158                         continue;
3159
3160                 prio = priv->plat->tx_queues_cfg[queue].prio;
3161                 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3162         }
3163 }
3164
3165 /**
3166  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3167  *  @priv: driver private structure
3168  *  Description: It is used for configuring the RX queue routing
3169  */
3170 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3171 {
3172         u32 rx_queues_count = priv->plat->rx_queues_to_use;
3173         u32 queue;
3174         u8 packet;
3175
3176         for (queue = 0; queue < rx_queues_count; queue++) {
3177                 /* no specific packet type routing specified for the queue */
3178                 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3179                         continue;
3180
3181                 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3182                 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3183         }
3184 }
3185
3186 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3187 {
3188         if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3189                 priv->rss.enable = false;
3190                 return;
3191         }
3192
3193         if (priv->dev->features & NETIF_F_RXHASH)
3194                 priv->rss.enable = true;
3195         else
3196                 priv->rss.enable = false;
3197
3198         stmmac_rss_configure(priv, priv->hw, &priv->rss,
3199                              priv->plat->rx_queues_to_use);
3200 }
3201
3202 /**
3203  *  stmmac_mtl_configuration - Configure MTL
3204  *  @priv: driver private structure
3205  *  Description: It is used for configurring MTL
3206  */
3207 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3208 {
3209         u32 rx_queues_count = priv->plat->rx_queues_to_use;
3210         u32 tx_queues_count = priv->plat->tx_queues_to_use;
3211
3212         if (tx_queues_count > 1)
3213                 stmmac_set_tx_queue_weight(priv);
3214
3215         /* Configure MTL RX algorithms */
3216         if (rx_queues_count > 1)
3217                 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3218                                 priv->plat->rx_sched_algorithm);
3219
3220         /* Configure MTL TX algorithms */
3221         if (tx_queues_count > 1)
3222                 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3223                                 priv->plat->tx_sched_algorithm);
3224
3225         /* Configure CBS in AVB TX queues */
3226         if (tx_queues_count > 1)
3227                 stmmac_configure_cbs(priv);
3228
3229         /* Map RX MTL to DMA channels */
3230         stmmac_rx_queue_dma_chan_map(priv);
3231
3232         /* Enable MAC RX Queues */
3233         stmmac_mac_enable_rx_queues(priv);
3234
3235         /* Set RX priorities */
3236         if (rx_queues_count > 1)
3237                 stmmac_mac_config_rx_queues_prio(priv);
3238
3239         /* Set TX priorities */
3240         if (tx_queues_count > 1)
3241                 stmmac_mac_config_tx_queues_prio(priv);
3242
3243         /* Set RX routing */
3244         if (rx_queues_count > 1)
3245                 stmmac_mac_config_rx_queues_routing(priv);
3246
3247         /* Receive Side Scaling */
3248         if (rx_queues_count > 1)
3249                 stmmac_mac_config_rss(priv);
3250 }
3251
3252 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3253 {
3254         if (priv->dma_cap.asp) {
3255                 netdev_info(priv->dev, "Enabling Safety Features\n");
3256                 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3257                                           priv->plat->safety_feat_cfg);
3258         } else {
3259                 netdev_info(priv->dev, "No Safety Features support found\n");
3260         }
3261 }
3262
3263 static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3264 {
3265         char *name;
3266
3267         clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3268         clear_bit(__FPE_REMOVING,  &priv->fpe_task_state);
3269
3270         name = priv->wq_name;
3271         sprintf(name, "%s-fpe", priv->dev->name);
3272
3273         priv->fpe_wq = create_singlethread_workqueue(name);
3274         if (!priv->fpe_wq) {
3275                 netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3276
3277                 return -ENOMEM;
3278         }
3279         netdev_info(priv->dev, "FPE workqueue start");
3280
3281         return 0;
3282 }
3283
3284 /**
3285  * stmmac_hw_setup - setup mac in a usable state.
3286  *  @dev : pointer to the device structure.
3287  *  @ptp_register: register PTP if set
3288  *  Description:
3289  *  this is the main function to setup the HW in a usable state because the
3290  *  dma engine is reset, the core registers are configured (e.g. AXI,
3291  *  Checksum features, timers). The DMA is ready to start receiving and
3292  *  transmitting.
3293  *  Return value:
3294  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3295  *  file on failure.
3296  */
3297 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3298 {
3299         struct stmmac_priv *priv = netdev_priv(dev);
3300         u32 rx_cnt = priv->plat->rx_queues_to_use;
3301         u32 tx_cnt = priv->plat->tx_queues_to_use;
3302         bool sph_en;
3303         u32 chan;
3304         int ret;
3305
3306         /* DMA initialization and SW reset */
3307         ret = stmmac_init_dma_engine(priv);
3308         if (ret < 0) {
3309                 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3310                            __func__);
3311                 return ret;
3312         }
3313
3314         /* Copy the MAC addr into the HW  */
3315         stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3316
3317         /* PS and related bits will be programmed according to the speed */
3318         if (priv->hw->pcs) {
3319                 int speed = priv->plat->mac_port_sel_speed;
3320
3321                 if ((speed == SPEED_10) || (speed == SPEED_100) ||
3322                     (speed == SPEED_1000)) {
3323                         priv->hw->ps = speed;
3324                 } else {
3325                         dev_warn(priv->device, "invalid port speed\n");
3326                         priv->hw->ps = 0;
3327                 }
3328         }
3329
3330         /* Initialize the MAC Core */
3331         stmmac_core_init(priv, priv->hw, dev);
3332
3333         /* Initialize MTL*/
3334         stmmac_mtl_configuration(priv);
3335
3336         /* Initialize Safety Features */
3337         stmmac_safety_feat_configuration(priv);
3338
3339         ret = stmmac_rx_ipc(priv, priv->hw);
3340         if (!ret) {
3341                 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3342                 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3343                 priv->hw->rx_csum = 0;
3344         }
3345
3346         /* Enable the MAC Rx/Tx */
3347         stmmac_mac_set(priv, priv->ioaddr, true);
3348
3349         /* Set the HW DMA mode and the COE */
3350         stmmac_dma_operation_mode(priv);
3351
3352         stmmac_mmc_setup(priv);
3353
3354         if (ptp_register) {
3355                 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3356                 if (ret < 0)
3357                         netdev_warn(priv->dev,
3358                                     "failed to enable PTP reference clock: %pe\n",
3359                                     ERR_PTR(ret));
3360         }
3361
3362         ret = stmmac_init_ptp(priv);
3363         if (ret == -EOPNOTSUPP)
3364                 netdev_info(priv->dev, "PTP not supported by HW\n");
3365         else if (ret)
3366                 netdev_warn(priv->dev, "PTP init failed\n");
3367         else if (ptp_register)
3368                 stmmac_ptp_register(priv);
3369
3370         priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3371
3372         /* Convert the timer from msec to usec */
3373         if (!priv->tx_lpi_timer)
3374                 priv->tx_lpi_timer = eee_timer * 1000;
3375
3376         if (priv->use_riwt) {
3377                 u32 queue;
3378
3379                 for (queue = 0; queue < rx_cnt; queue++) {
3380                         if (!priv->rx_riwt[queue])
3381                                 priv->rx_riwt[queue] = DEF_DMA_RIWT;
3382
3383                         stmmac_rx_watchdog(priv, priv->ioaddr,
3384                                            priv->rx_riwt[queue], queue);
3385                 }
3386         }
3387
3388         if (priv->hw->pcs)
3389                 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3390
3391         /* set TX and RX rings length */
3392         stmmac_set_rings_length(priv);
3393
3394         /* Enable TSO */
3395         if (priv->tso) {
3396                 for (chan = 0; chan < tx_cnt; chan++) {
3397                         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3398
3399                         /* TSO and TBS cannot co-exist */
3400                         if (tx_q->tbs & STMMAC_TBS_AVAIL)
3401                                 continue;
3402
3403                         stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3404                 }
3405         }
3406
3407         /* Enable Split Header */
3408         sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3409         for (chan = 0; chan < rx_cnt; chan++)
3410                 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3411
3412
3413         /* VLAN Tag Insertion */
3414         if (priv->dma_cap.vlins)
3415                 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3416
3417         /* TBS */
3418         for (chan = 0; chan < tx_cnt; chan++) {
3419                 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3420                 int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3421
3422                 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3423         }
3424
3425         /* Configure real RX and TX queues */
3426         netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3427         netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3428
3429         /* Start the ball rolling... */
3430         stmmac_start_all_dma(priv);
3431
3432         if (priv->dma_cap.fpesel) {
3433                 stmmac_fpe_start_wq(priv);
3434
3435                 if (priv->plat->fpe_cfg->enable)
3436                         stmmac_fpe_handshake(priv, true);
3437         }
3438
3439         return 0;
3440 }
3441
3442 static void stmmac_hw_teardown(struct net_device *dev)
3443 {
3444         struct stmmac_priv *priv = netdev_priv(dev);
3445
3446         clk_disable_unprepare(priv->plat->clk_ptp_ref);
3447 }
3448
3449 static void stmmac_free_irq(struct net_device *dev,
3450                             enum request_irq_err irq_err, int irq_idx)
3451 {
3452         struct stmmac_priv *priv = netdev_priv(dev);
3453         int j;
3454
3455         switch (irq_err) {
3456         case REQ_IRQ_ERR_ALL:
3457                 irq_idx = priv->plat->tx_queues_to_use;
3458                 fallthrough;
3459         case REQ_IRQ_ERR_TX:
3460                 for (j = irq_idx - 1; j >= 0; j--) {
3461                         if (priv->tx_irq[j] > 0) {
3462                                 irq_set_affinity_hint(priv->tx_irq[j], NULL);
3463                                 free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3464                         }
3465                 }
3466                 irq_idx = priv->plat->rx_queues_to_use;
3467                 fallthrough;
3468         case REQ_IRQ_ERR_RX:
3469                 for (j = irq_idx - 1; j >= 0; j--) {
3470                         if (priv->rx_irq[j] > 0) {
3471                                 irq_set_affinity_hint(priv->rx_irq[j], NULL);
3472                                 free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3473                         }
3474                 }
3475
3476                 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3477                         free_irq(priv->sfty_ue_irq, dev);
3478                 fallthrough;
3479         case REQ_IRQ_ERR_SFTY_UE:
3480                 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3481                         free_irq(priv->sfty_ce_irq, dev);
3482                 fallthrough;
3483         case REQ_IRQ_ERR_SFTY_CE:
3484                 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3485                         free_irq(priv->lpi_irq, dev);
3486                 fallthrough;
3487         case REQ_IRQ_ERR_LPI:
3488                 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3489                         free_irq(priv->wol_irq, dev);
3490                 fallthrough;
3491         case REQ_IRQ_ERR_WOL:
3492                 free_irq(dev->irq, dev);
3493                 fallthrough;
3494         case REQ_IRQ_ERR_MAC:
3495         case REQ_IRQ_ERR_NO:
3496                 /* If MAC IRQ request error, no more IRQ to free */
3497                 break;
3498         }
3499 }
3500
3501 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3502 {
3503         struct stmmac_priv *priv = netdev_priv(dev);
3504         enum request_irq_err irq_err;
3505         cpumask_t cpu_mask;
3506         int irq_idx = 0;
3507         char *int_name;
3508         int ret;
3509         int i;
3510
3511         /* For common interrupt */
3512         int_name = priv->int_name_mac;
3513         sprintf(int_name, "%s:%s", dev->name, "mac");
3514         ret = request_irq(dev->irq, stmmac_mac_interrupt,
3515                           0, int_name, dev);
3516         if (unlikely(ret < 0)) {
3517                 netdev_err(priv->dev,
3518                            "%s: alloc mac MSI %d (error: %d)\n",
3519                            __func__, dev->irq, ret);
3520                 irq_err = REQ_IRQ_ERR_MAC;
3521                 goto irq_error;
3522         }
3523
3524         /* Request the Wake IRQ in case of another line
3525          * is used for WoL
3526          */
3527         if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3528                 int_name = priv->int_name_wol;
3529                 sprintf(int_name, "%s:%s", dev->name, "wol");
3530                 ret = request_irq(priv->wol_irq,
3531                                   stmmac_mac_interrupt,
3532                                   0, int_name, dev);
3533                 if (unlikely(ret < 0)) {
3534                         netdev_err(priv->dev,
3535                                    "%s: alloc wol MSI %d (error: %d)\n",
3536                                    __func__, priv->wol_irq, ret);
3537                         irq_err = REQ_IRQ_ERR_WOL;
3538                         goto irq_error;
3539                 }
3540         }
3541
3542         /* Request the LPI IRQ in case of another line
3543          * is used for LPI
3544          */
3545         if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3546                 int_name = priv->int_name_lpi;
3547                 sprintf(int_name, "%s:%s", dev->name, "lpi");
3548                 ret = request_irq(priv->lpi_irq,
3549                                   stmmac_mac_interrupt,
3550                                   0, int_name, dev);
3551                 if (unlikely(ret < 0)) {
3552                         netdev_err(priv->dev,
3553                                    "%s: alloc lpi MSI %d (error: %d)\n",
3554                                    __func__, priv->lpi_irq, ret);
3555                         irq_err = REQ_IRQ_ERR_LPI;
3556                         goto irq_error;
3557                 }
3558         }
3559
3560         /* Request the Safety Feature Correctible Error line in
3561          * case of another line is used
3562          */
3563         if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3564                 int_name = priv->int_name_sfty_ce;
3565                 sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3566                 ret = request_irq(priv->sfty_ce_irq,
3567                                   stmmac_safety_interrupt,
3568                                   0, int_name, dev);
3569                 if (unlikely(ret < 0)) {
3570                         netdev_err(priv->dev,
3571                                    "%s: alloc sfty ce MSI %d (error: %d)\n",
3572                                    __func__, priv->sfty_ce_irq, ret);
3573                         irq_err = REQ_IRQ_ERR_SFTY_CE;
3574                         goto irq_error;
3575                 }
3576         }
3577
3578         /* Request the Safety Feature Uncorrectible Error line in
3579          * case of another line is used
3580          */
3581         if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3582                 int_name = priv->int_name_sfty_ue;
3583                 sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3584                 ret = request_irq(priv->sfty_ue_irq,
3585                                   stmmac_safety_interrupt,
3586                                   0, int_name, dev);
3587                 if (unlikely(ret < 0)) {
3588                         netdev_err(priv->dev,
3589                                    "%s: alloc sfty ue MSI %d (error: %d)\n",
3590                                    __func__, priv->sfty_ue_irq, ret);
3591                         irq_err = REQ_IRQ_ERR_SFTY_UE;
3592                         goto irq_error;
3593                 }
3594         }
3595
3596         /* Request Rx MSI irq */
3597         for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3598                 if (i >= MTL_MAX_RX_QUEUES)
3599                         break;
3600                 if (priv->rx_irq[i] == 0)
3601                         continue;
3602
3603                 int_name = priv->int_name_rx_irq[i];
3604                 sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3605                 ret = request_irq(priv->rx_irq[i],
3606                                   stmmac_msi_intr_rx,
3607                                   0, int_name, &priv->dma_conf.rx_queue[i]);
3608                 if (unlikely(ret < 0)) {
3609                         netdev_err(priv->dev,
3610                                    "%s: alloc rx-%d  MSI %d (error: %d)\n",
3611                                    __func__, i, priv->rx_irq[i], ret);
3612                         irq_err = REQ_IRQ_ERR_RX;
3613                         irq_idx = i;
3614                         goto irq_error;
3615                 }
3616                 cpumask_clear(&cpu_mask);
3617                 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3618                 irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3619         }
3620
3621         /* Request Tx MSI irq */
3622         for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3623                 if (i >= MTL_MAX_TX_QUEUES)
3624                         break;
3625                 if (priv->tx_irq[i] == 0)
3626                         continue;
3627
3628                 int_name = priv->int_name_tx_irq[i];
3629                 sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3630                 ret = request_irq(priv->tx_irq[i],
3631                                   stmmac_msi_intr_tx,
3632                                   0, int_name, &priv->dma_conf.tx_queue[i]);
3633                 if (unlikely(ret < 0)) {
3634                         netdev_err(priv->dev,
3635                                    "%s: alloc tx-%d  MSI %d (error: %d)\n",
3636                                    __func__, i, priv->tx_irq[i], ret);
3637                         irq_err = REQ_IRQ_ERR_TX;
3638                         irq_idx = i;
3639                         goto irq_error;
3640                 }
3641                 cpumask_clear(&cpu_mask);
3642                 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3643                 irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3644         }
3645
3646         return 0;
3647
3648 irq_error:
3649         stmmac_free_irq(dev, irq_err, irq_idx);
3650         return ret;
3651 }
3652
3653 static int stmmac_request_irq_single(struct net_device *dev)
3654 {
3655         struct stmmac_priv *priv = netdev_priv(dev);
3656         enum request_irq_err irq_err;
3657         int ret;
3658
3659         ret = request_irq(dev->irq, stmmac_interrupt,
3660                           IRQF_SHARED, dev->name, dev);
3661         if (unlikely(ret < 0)) {
3662                 netdev_err(priv->dev,
3663                            "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3664                            __func__, dev->irq, ret);
3665                 irq_err = REQ_IRQ_ERR_MAC;
3666                 goto irq_error;
3667         }
3668
3669         /* Request the Wake IRQ in case of another line
3670          * is used for WoL
3671          */
3672         if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3673                 ret = request_irq(priv->wol_irq, stmmac_interrupt,
3674                                   IRQF_SHARED, dev->name, dev);
3675                 if (unlikely(ret < 0)) {
3676                         netdev_err(priv->dev,
3677                                    "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3678                                    __func__, priv->wol_irq, ret);
3679                         irq_err = REQ_IRQ_ERR_WOL;
3680                         goto irq_error;
3681                 }
3682         }
3683
3684         /* Request the IRQ lines */
3685         if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3686                 ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3687                                   IRQF_SHARED, dev->name, dev);
3688                 if (unlikely(ret < 0)) {
3689                         netdev_err(priv->dev,
3690                                    "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3691                                    __func__, priv->lpi_irq, ret);
3692                         irq_err = REQ_IRQ_ERR_LPI;
3693                         goto irq_error;
3694                 }
3695         }
3696
3697         return 0;
3698
3699 irq_error:
3700         stmmac_free_irq(dev, irq_err, 0);
3701         return ret;
3702 }
3703
3704 static int stmmac_request_irq(struct net_device *dev)
3705 {
3706         struct stmmac_priv *priv = netdev_priv(dev);
3707         int ret;
3708
3709         /* Request the IRQ lines */
3710         if (priv->plat->multi_msi_en)
3711                 ret = stmmac_request_irq_multi_msi(dev);
3712         else
3713                 ret = stmmac_request_irq_single(dev);
3714
3715         return ret;
3716 }
3717
3718 /**
3719  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3720  *  @priv: driver private structure
3721  *  @mtu: MTU to setup the dma queue and buf with
3722  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3723  *  Allocate the Tx/Rx DMA queue and init them.
3724  *  Return value:
3725  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3726  */
3727 static struct stmmac_dma_conf *
3728 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3729 {
3730         struct stmmac_dma_conf *dma_conf;
3731         int chan, bfsize, ret;
3732
3733         dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3734         if (!dma_conf) {
3735                 netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3736                            __func__);
3737                 return ERR_PTR(-ENOMEM);
3738         }
3739
3740         bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3741         if (bfsize < 0)
3742                 bfsize = 0;
3743
3744         if (bfsize < BUF_SIZE_16KiB)
3745                 bfsize = stmmac_set_bfsize(mtu, 0);
3746
3747         dma_conf->dma_buf_sz = bfsize;
3748         /* Chose the tx/rx size from the already defined one in the
3749          * priv struct. (if defined)
3750          */
3751         dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3752         dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3753
3754         if (!dma_conf->dma_tx_size)
3755                 dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3756         if (!dma_conf->dma_rx_size)
3757                 dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3758
3759         /* Earlier check for TBS */
3760         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3761                 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3762                 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3763
3764                 /* Setup per-TXQ tbs flag before TX descriptor alloc */
3765                 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3766         }
3767
3768         ret = alloc_dma_desc_resources(priv, dma_conf);
3769         if (ret < 0) {
3770                 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3771                            __func__);
3772                 goto alloc_error;
3773         }
3774
3775         ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3776         if (ret < 0) {
3777                 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3778                            __func__);
3779                 goto init_error;
3780         }
3781
3782         return dma_conf;
3783
3784 init_error:
3785         free_dma_desc_resources(priv, dma_conf);
3786 alloc_error:
3787         kfree(dma_conf);
3788         return ERR_PTR(ret);
3789 }
3790
3791 /**
3792  *  __stmmac_open - open entry point of the driver
3793  *  @dev : pointer to the device structure.
3794  *  @dma_conf :  structure to take the dma data
3795  *  Description:
3796  *  This function is the open entry point of the driver.
3797  *  Return value:
3798  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3799  *  file on failure.
3800  */
3801 static int __stmmac_open(struct net_device *dev,
3802                          struct stmmac_dma_conf *dma_conf)
3803 {
3804         struct stmmac_priv *priv = netdev_priv(dev);
3805         int mode = priv->plat->phy_interface;
3806         u32 chan;
3807         int ret;
3808
3809         ret = pm_runtime_resume_and_get(priv->device);
3810         if (ret < 0)
3811                 return ret;
3812
3813         if (priv->hw->pcs != STMMAC_PCS_TBI &&
3814             priv->hw->pcs != STMMAC_PCS_RTBI &&
3815             (!priv->hw->xpcs ||
3816              xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
3817                 ret = stmmac_init_phy(dev);
3818                 if (ret) {
3819                         netdev_err(priv->dev,
3820                                    "%s: Cannot attach to PHY (error: %d)\n",
3821                                    __func__, ret);
3822                         goto init_phy_error;
3823                 }
3824         }
3825
3826         /* Extra statistics */
3827         memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
3828         priv->xstats.threshold = tc;
3829
3830         priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3831
3832         buf_sz = dma_conf->dma_buf_sz;
3833         memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3834
3835         stmmac_reset_queues_param(priv);
3836
3837         if (!priv->plat->serdes_up_after_phy_linkup && priv->plat->serdes_powerup) {
3838                 ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3839                 if (ret < 0) {
3840                         netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3841                                    __func__);
3842                         goto init_error;
3843                 }
3844         }
3845
3846         ret = stmmac_hw_setup(dev, true);
3847         if (ret < 0) {
3848                 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3849                 goto init_error;
3850         }
3851
3852         stmmac_init_coalesce(priv);
3853
3854         phylink_start(priv->phylink);
3855         /* We may have called phylink_speed_down before */
3856         phylink_speed_up(priv->phylink);
3857
3858         ret = stmmac_request_irq(dev);
3859         if (ret)
3860                 goto irq_error;
3861
3862         stmmac_enable_all_queues(priv);
3863         netif_tx_start_all_queues(priv->dev);
3864         stmmac_enable_all_dma_irq(priv);
3865
3866         return 0;
3867
3868 irq_error:
3869         phylink_stop(priv->phylink);
3870
3871         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3872                 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3873
3874         stmmac_hw_teardown(dev);
3875 init_error:
3876         free_dma_desc_resources(priv, &priv->dma_conf);
3877         phylink_disconnect_phy(priv->phylink);
3878 init_phy_error:
3879         pm_runtime_put(priv->device);
3880         return ret;
3881 }
3882
3883 static int stmmac_open(struct net_device *dev)
3884 {
3885         struct stmmac_priv *priv = netdev_priv(dev);
3886         struct stmmac_dma_conf *dma_conf;
3887         int ret;
3888
3889         dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
3890         if (IS_ERR(dma_conf))
3891                 return PTR_ERR(dma_conf);
3892
3893         ret = __stmmac_open(dev, dma_conf);
3894         kfree(dma_conf);
3895         return ret;
3896 }
3897
3898 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
3899 {
3900         set_bit(__FPE_REMOVING, &priv->fpe_task_state);
3901
3902         if (priv->fpe_wq)
3903                 destroy_workqueue(priv->fpe_wq);
3904
3905         netdev_info(priv->dev, "FPE workqueue stop");
3906 }
3907
3908 /**
3909  *  stmmac_release - close entry point of the driver
3910  *  @dev : device pointer.
3911  *  Description:
3912  *  This is the stop entry point of the driver.
3913  */
3914 static int stmmac_release(struct net_device *dev)
3915 {
3916         struct stmmac_priv *priv = netdev_priv(dev);
3917         u32 chan;
3918
3919         if (device_may_wakeup(priv->device))
3920                 phylink_speed_down(priv->phylink, false);
3921         /* Stop and disconnect the PHY */
3922         phylink_stop(priv->phylink);
3923         phylink_disconnect_phy(priv->phylink);
3924
3925         stmmac_disable_all_queues(priv);
3926
3927         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3928                 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3929
3930         netif_tx_disable(dev);
3931
3932         /* Free the IRQ lines */
3933         stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
3934
3935         if (priv->eee_enabled) {
3936                 priv->tx_path_in_lpi_mode = false;
3937                 del_timer_sync(&priv->eee_ctrl_timer);
3938         }
3939
3940         /* Stop TX/RX DMA and clear the descriptors */
3941         stmmac_stop_all_dma(priv);
3942
3943         /* Release and free the Rx/Tx resources */
3944         free_dma_desc_resources(priv, &priv->dma_conf);
3945
3946         /* Disable the MAC Rx/Tx */
3947         stmmac_mac_set(priv, priv->ioaddr, false);
3948
3949         /* Powerdown Serdes if there is */
3950         if (priv->plat->serdes_powerdown)
3951                 priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
3952
3953         netif_carrier_off(dev);
3954
3955         stmmac_release_ptp(priv);
3956
3957         pm_runtime_put(priv->device);
3958
3959         if (priv->dma_cap.fpesel)
3960                 stmmac_fpe_stop_wq(priv);
3961
3962         return 0;
3963 }
3964
3965 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
3966                                struct stmmac_tx_queue *tx_q)
3967 {
3968         u16 tag = 0x0, inner_tag = 0x0;
3969         u32 inner_type = 0x0;
3970         struct dma_desc *p;
3971
3972         if (!priv->dma_cap.vlins)
3973                 return false;
3974         if (!skb_vlan_tag_present(skb))
3975                 return false;
3976         if (skb->vlan_proto == htons(ETH_P_8021AD)) {
3977                 inner_tag = skb_vlan_tag_get(skb);
3978                 inner_type = STMMAC_VLAN_INSERT;
3979         }
3980
3981         tag = skb_vlan_tag_get(skb);
3982
3983         if (tx_q->tbs & STMMAC_TBS_AVAIL)
3984                 p = &tx_q->dma_entx[tx_q->cur_tx].basic;
3985         else
3986                 p = &tx_q->dma_tx[tx_q->cur_tx];
3987
3988         if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
3989                 return false;
3990
3991         stmmac_set_tx_owner(priv, p);
3992         tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
3993         return true;
3994 }
3995
3996 /**
3997  *  stmmac_tso_allocator - close entry point of the driver
3998  *  @priv: driver private structure
3999  *  @des: buffer start address
4000  *  @total_len: total length to fill in descriptors
4001  *  @last_segment: condition for the last descriptor
4002  *  @queue: TX queue index
4003  *  Description:
4004  *  This function fills descriptor and request new descriptors according to
4005  *  buffer length to fill
4006  */
4007 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4008                                  int total_len, bool last_segment, u32 queue)
4009 {
4010         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4011         struct dma_desc *desc;
4012         u32 buff_size;
4013         int tmp_len;
4014
4015         tmp_len = total_len;
4016
4017         while (tmp_len > 0) {
4018                 dma_addr_t curr_addr;
4019
4020                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4021                                                 priv->dma_conf.dma_tx_size);
4022                 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4023
4024                 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4025                         desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4026                 else
4027                         desc = &tx_q->dma_tx[tx_q->cur_tx];
4028
4029                 curr_addr = des + (total_len - tmp_len);
4030                 if (priv->dma_cap.addr64 <= 32)
4031                         desc->des0 = cpu_to_le32(curr_addr);
4032                 else
4033                         stmmac_set_desc_addr(priv, desc, curr_addr);
4034
4035                 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4036                             TSO_MAX_BUFF_SIZE : tmp_len;
4037
4038                 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4039                                 0, 1,
4040                                 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4041                                 0, 0);
4042
4043                 tmp_len -= TSO_MAX_BUFF_SIZE;
4044         }
4045 }
4046
4047 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4048 {
4049         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4050         int desc_size;
4051
4052         if (likely(priv->extend_desc))
4053                 desc_size = sizeof(struct dma_extended_desc);
4054         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4055                 desc_size = sizeof(struct dma_edesc);
4056         else
4057                 desc_size = sizeof(struct dma_desc);
4058
4059         /* The own bit must be the latest setting done when prepare the
4060          * descriptor and then barrier is needed to make sure that
4061          * all is coherent before granting the DMA engine.
4062          */
4063         wmb();
4064
4065         tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4066         stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4067 }
4068
4069 /**
4070  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4071  *  @skb : the socket buffer
4072  *  @dev : device pointer
4073  *  Description: this is the transmit function that is called on TSO frames
4074  *  (support available on GMAC4 and newer chips).
4075  *  Diagram below show the ring programming in case of TSO frames:
4076  *
4077  *  First Descriptor
4078  *   --------
4079  *   | DES0 |---> buffer1 = L2/L3/L4 header
4080  *   | DES1 |---> TCP Payload (can continue on next descr...)
4081  *   | DES2 |---> buffer 1 and 2 len
4082  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4083  *   --------
4084  *      |
4085  *     ...
4086  *      |
4087  *   --------
4088  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
4089  *   | DES1 | --|
4090  *   | DES2 | --> buffer 1 and 2 len
4091  *   | DES3 |
4092  *   --------
4093  *
4094  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4095  */
4096 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4097 {
4098         struct dma_desc *desc, *first, *mss_desc = NULL;
4099         struct stmmac_priv *priv = netdev_priv(dev);
4100         int nfrags = skb_shinfo(skb)->nr_frags;
4101         u32 queue = skb_get_queue_mapping(skb);
4102         unsigned int first_entry, tx_packets;
4103         int tmp_pay_len = 0, first_tx;
4104         struct stmmac_tx_queue *tx_q;
4105         bool has_vlan, set_ic;
4106         u8 proto_hdr_len, hdr;
4107         u32 pay_len, mss;
4108         dma_addr_t des;
4109         int i;
4110
4111         tx_q = &priv->dma_conf.tx_queue[queue];
4112         first_tx = tx_q->cur_tx;
4113
4114         /* Compute header lengths */
4115         if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4116                 proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4117                 hdr = sizeof(struct udphdr);
4118         } else {
4119                 proto_hdr_len = skb_tcp_all_headers(skb);
4120                 hdr = tcp_hdrlen(skb);
4121         }
4122
4123         /* Desc availability based on threshold should be enough safe */
4124         if (unlikely(stmmac_tx_avail(priv, queue) <
4125                 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4126                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4127                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4128                                                                 queue));
4129                         /* This is a hard error, log it. */
4130                         netdev_err(priv->dev,
4131                                    "%s: Tx Ring full when queue awake\n",
4132                                    __func__);
4133                 }
4134                 return NETDEV_TX_BUSY;
4135         }
4136
4137         pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4138
4139         mss = skb_shinfo(skb)->gso_size;
4140
4141         /* set new MSS value if needed */
4142         if (mss != tx_q->mss) {
4143                 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4144                         mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4145                 else
4146                         mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4147
4148                 stmmac_set_mss(priv, mss_desc, mss);
4149                 tx_q->mss = mss;
4150                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4151                                                 priv->dma_conf.dma_tx_size);
4152                 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4153         }
4154
4155         if (netif_msg_tx_queued(priv)) {
4156                 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4157                         __func__, hdr, proto_hdr_len, pay_len, mss);
4158                 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4159                         skb->data_len);
4160         }
4161
4162         /* Check if VLAN can be inserted by HW */
4163         has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4164
4165         first_entry = tx_q->cur_tx;
4166         WARN_ON(tx_q->tx_skbuff[first_entry]);
4167
4168         if (tx_q->tbs & STMMAC_TBS_AVAIL)
4169                 desc = &tx_q->dma_entx[first_entry].basic;
4170         else
4171                 desc = &tx_q->dma_tx[first_entry];
4172         first = desc;
4173
4174         if (has_vlan)
4175                 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4176
4177         /* first descriptor: fill Headers on Buf1 */
4178         des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4179                              DMA_TO_DEVICE);
4180         if (dma_mapping_error(priv->device, des))
4181                 goto dma_map_err;
4182
4183         tx_q->tx_skbuff_dma[first_entry].buf = des;
4184         tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4185         tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4186         tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4187
4188         if (priv->dma_cap.addr64 <= 32) {
4189                 first->des0 = cpu_to_le32(des);
4190
4191                 /* Fill start of payload in buff2 of first descriptor */
4192                 if (pay_len)
4193                         first->des1 = cpu_to_le32(des + proto_hdr_len);
4194
4195                 /* If needed take extra descriptors to fill the remaining payload */
4196                 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4197         } else {
4198                 stmmac_set_desc_addr(priv, first, des);
4199                 tmp_pay_len = pay_len;
4200                 des += proto_hdr_len;
4201                 pay_len = 0;
4202         }
4203
4204         stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4205
4206         /* Prepare fragments */
4207         for (i = 0; i < nfrags; i++) {
4208                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4209
4210                 des = skb_frag_dma_map(priv->device, frag, 0,
4211                                        skb_frag_size(frag),
4212                                        DMA_TO_DEVICE);
4213                 if (dma_mapping_error(priv->device, des))
4214                         goto dma_map_err;
4215
4216                 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4217                                      (i == nfrags - 1), queue);
4218
4219                 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4220                 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4221                 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4222                 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4223         }
4224
4225         tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4226
4227         /* Only the last descriptor gets to point to the skb. */
4228         tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4229         tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4230
4231         /* Manage tx mitigation */
4232         tx_packets = (tx_q->cur_tx + 1) - first_tx;
4233         tx_q->tx_count_frames += tx_packets;
4234
4235         if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4236                 set_ic = true;
4237         else if (!priv->tx_coal_frames[queue])
4238                 set_ic = false;
4239         else if (tx_packets > priv->tx_coal_frames[queue])
4240                 set_ic = true;
4241         else if ((tx_q->tx_count_frames %
4242                   priv->tx_coal_frames[queue]) < tx_packets)
4243                 set_ic = true;
4244         else
4245                 set_ic = false;
4246
4247         if (set_ic) {
4248                 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4249                         desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4250                 else
4251                         desc = &tx_q->dma_tx[tx_q->cur_tx];
4252
4253                 tx_q->tx_count_frames = 0;
4254                 stmmac_set_tx_ic(priv, desc);
4255                 priv->xstats.tx_set_ic_bit++;
4256         }
4257
4258         /* We've used all descriptors we need for this skb, however,
4259          * advance cur_tx so that it references a fresh descriptor.
4260          * ndo_start_xmit will fill this descriptor the next time it's
4261          * called and stmmac_tx_clean may clean up to this descriptor.
4262          */
4263         tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4264
4265         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4266                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4267                           __func__);
4268                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4269         }
4270
4271         dev->stats.tx_bytes += skb->len;
4272         priv->xstats.tx_tso_frames++;
4273         priv->xstats.tx_tso_nfrags += nfrags;
4274
4275         if (priv->sarc_type)
4276                 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4277
4278         skb_tx_timestamp(skb);
4279
4280         if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4281                      priv->hwts_tx_en)) {
4282                 /* declare that device is doing timestamping */
4283                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4284                 stmmac_enable_tx_timestamp(priv, first);
4285         }
4286
4287         /* Complete the first descriptor before granting the DMA */
4288         stmmac_prepare_tso_tx_desc(priv, first, 1,
4289                         proto_hdr_len,
4290                         pay_len,
4291                         1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4292                         hdr / 4, (skb->len - proto_hdr_len));
4293
4294         /* If context desc is used to change MSS */
4295         if (mss_desc) {
4296                 /* Make sure that first descriptor has been completely
4297                  * written, including its own bit. This is because MSS is
4298                  * actually before first descriptor, so we need to make
4299                  * sure that MSS's own bit is the last thing written.
4300                  */
4301                 dma_wmb();
4302                 stmmac_set_tx_owner(priv, mss_desc);
4303         }
4304
4305         if (netif_msg_pktdata(priv)) {
4306                 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4307                         __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4308                         tx_q->cur_tx, first, nfrags);
4309                 pr_info(">>> frame to be transmitted: ");
4310                 print_pkt(skb->data, skb_headlen(skb));
4311         }
4312
4313         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4314
4315         stmmac_flush_tx_descriptors(priv, queue);
4316         stmmac_tx_timer_arm(priv, queue);
4317
4318         return NETDEV_TX_OK;
4319
4320 dma_map_err:
4321         dev_err(priv->device, "Tx dma map failed\n");
4322         dev_kfree_skb(skb);
4323         priv->dev->stats.tx_dropped++;
4324         return NETDEV_TX_OK;
4325 }
4326
4327 /**
4328  *  stmmac_xmit - Tx entry point of the driver
4329  *  @skb : the socket buffer
4330  *  @dev : device pointer
4331  *  Description : this is the tx entry point of the driver.
4332  *  It programs the chain or the ring and supports oversized frames
4333  *  and SG feature.
4334  */
4335 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4336 {
4337         unsigned int first_entry, tx_packets, enh_desc;
4338         struct stmmac_priv *priv = netdev_priv(dev);
4339         unsigned int nopaged_len = skb_headlen(skb);
4340         int i, csum_insertion = 0, is_jumbo = 0;
4341         u32 queue = skb_get_queue_mapping(skb);
4342         int nfrags = skb_shinfo(skb)->nr_frags;
4343         int gso = skb_shinfo(skb)->gso_type;
4344         struct dma_edesc *tbs_desc = NULL;
4345         struct dma_desc *desc, *first;
4346         struct stmmac_tx_queue *tx_q;
4347         bool has_vlan, set_ic;
4348         int entry, first_tx;
4349         dma_addr_t des;
4350
4351         tx_q = &priv->dma_conf.tx_queue[queue];
4352         first_tx = tx_q->cur_tx;
4353
4354         if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4355                 stmmac_disable_eee_mode(priv);
4356
4357         /* Manage oversized TCP frames for GMAC4 device */
4358         if (skb_is_gso(skb) && priv->tso) {
4359                 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4360                         return stmmac_tso_xmit(skb, dev);
4361                 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4362                         return stmmac_tso_xmit(skb, dev);
4363         }
4364
4365         if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4366                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4367                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4368                                                                 queue));
4369                         /* This is a hard error, log it. */
4370                         netdev_err(priv->dev,
4371                                    "%s: Tx Ring full when queue awake\n",
4372                                    __func__);
4373                 }
4374                 return NETDEV_TX_BUSY;
4375         }
4376
4377         /* Check if VLAN can be inserted by HW */
4378         has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4379
4380         entry = tx_q->cur_tx;
4381         first_entry = entry;
4382         WARN_ON(tx_q->tx_skbuff[first_entry]);
4383
4384         csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4385
4386         if (likely(priv->extend_desc))
4387                 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4388         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4389                 desc = &tx_q->dma_entx[entry].basic;
4390         else
4391                 desc = tx_q->dma_tx + entry;
4392
4393         first = desc;
4394
4395         if (has_vlan)
4396                 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4397
4398         enh_desc = priv->plat->enh_desc;
4399         /* To program the descriptors according to the size of the frame */
4400         if (enh_desc)
4401                 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4402
4403         if (unlikely(is_jumbo)) {
4404                 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4405                 if (unlikely(entry < 0) && (entry != -EINVAL))
4406                         goto dma_map_err;
4407         }
4408
4409         for (i = 0; i < nfrags; i++) {
4410                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4411                 int len = skb_frag_size(frag);
4412                 bool last_segment = (i == (nfrags - 1));
4413
4414                 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4415                 WARN_ON(tx_q->tx_skbuff[entry]);
4416
4417                 if (likely(priv->extend_desc))
4418                         desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4419                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4420                         desc = &tx_q->dma_entx[entry].basic;
4421                 else
4422                         desc = tx_q->dma_tx + entry;
4423
4424                 des = skb_frag_dma_map(priv->device, frag, 0, len,
4425                                        DMA_TO_DEVICE);
4426                 if (dma_mapping_error(priv->device, des))
4427                         goto dma_map_err; /* should reuse desc w/o issues */
4428
4429                 tx_q->tx_skbuff_dma[entry].buf = des;
4430
4431                 stmmac_set_desc_addr(priv, desc, des);
4432
4433                 tx_q->tx_skbuff_dma[entry].map_as_page = true;
4434                 tx_q->tx_skbuff_dma[entry].len = len;
4435                 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4436                 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4437
4438                 /* Prepare the descriptor and set the own bit too */
4439                 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4440                                 priv->mode, 1, last_segment, skb->len);
4441         }
4442
4443         /* Only the last descriptor gets to point to the skb. */
4444         tx_q->tx_skbuff[entry] = skb;
4445         tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4446
4447         /* According to the coalesce parameter the IC bit for the latest
4448          * segment is reset and the timer re-started to clean the tx status.
4449          * This approach takes care about the fragments: desc is the first
4450          * element in case of no SG.
4451          */
4452         tx_packets = (entry + 1) - first_tx;
4453         tx_q->tx_count_frames += tx_packets;
4454
4455         if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4456                 set_ic = true;
4457         else if (!priv->tx_coal_frames[queue])
4458                 set_ic = false;
4459         else if (tx_packets > priv->tx_coal_frames[queue])
4460                 set_ic = true;
4461         else if ((tx_q->tx_count_frames %
4462                   priv->tx_coal_frames[queue]) < tx_packets)
4463                 set_ic = true;
4464         else
4465                 set_ic = false;
4466
4467         if (set_ic) {
4468                 if (likely(priv->extend_desc))
4469                         desc = &tx_q->dma_etx[entry].basic;
4470                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4471                         desc = &tx_q->dma_entx[entry].basic;
4472                 else
4473                         desc = &tx_q->dma_tx[entry];
4474
4475                 tx_q->tx_count_frames = 0;
4476                 stmmac_set_tx_ic(priv, desc);
4477                 priv->xstats.tx_set_ic_bit++;
4478         }
4479
4480         /* We've used all descriptors we need for this skb, however,
4481          * advance cur_tx so that it references a fresh descriptor.
4482          * ndo_start_xmit will fill this descriptor the next time it's
4483          * called and stmmac_tx_clean may clean up to this descriptor.
4484          */
4485         entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4486         tx_q->cur_tx = entry;
4487
4488         if (netif_msg_pktdata(priv)) {
4489                 netdev_dbg(priv->dev,
4490                            "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4491                            __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4492                            entry, first, nfrags);
4493
4494                 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4495                 print_pkt(skb->data, skb->len);
4496         }
4497
4498         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4499                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4500                           __func__);
4501                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4502         }
4503
4504         dev->stats.tx_bytes += skb->len;
4505
4506         if (priv->sarc_type)
4507                 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4508
4509         skb_tx_timestamp(skb);
4510
4511         /* Ready to fill the first descriptor and set the OWN bit w/o any
4512          * problems because all the descriptors are actually ready to be
4513          * passed to the DMA engine.
4514          */
4515         if (likely(!is_jumbo)) {
4516                 bool last_segment = (nfrags == 0);
4517
4518                 des = dma_map_single(priv->device, skb->data,
4519                                      nopaged_len, DMA_TO_DEVICE);
4520                 if (dma_mapping_error(priv->device, des))
4521                         goto dma_map_err;
4522
4523                 tx_q->tx_skbuff_dma[first_entry].buf = des;
4524                 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4525                 tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4526
4527                 stmmac_set_desc_addr(priv, first, des);
4528
4529                 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4530                 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4531
4532                 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4533                              priv->hwts_tx_en)) {
4534                         /* declare that device is doing timestamping */
4535                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4536                         stmmac_enable_tx_timestamp(priv, first);
4537                 }
4538
4539                 /* Prepare the first descriptor setting the OWN bit too */
4540                 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4541                                 csum_insertion, priv->mode, 0, last_segment,
4542                                 skb->len);
4543         }
4544
4545         if (tx_q->tbs & STMMAC_TBS_EN) {
4546                 struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4547
4548                 tbs_desc = &tx_q->dma_entx[first_entry];
4549                 stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4550         }
4551
4552         stmmac_set_tx_owner(priv, first);
4553
4554         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4555
4556         stmmac_enable_dma_transmission(priv, priv->ioaddr);
4557
4558         stmmac_flush_tx_descriptors(priv, queue);
4559         stmmac_tx_timer_arm(priv, queue);
4560
4561         return NETDEV_TX_OK;
4562
4563 dma_map_err:
4564         netdev_err(priv->dev, "Tx DMA map failed\n");
4565         dev_kfree_skb(skb);
4566         priv->dev->stats.tx_dropped++;
4567         return NETDEV_TX_OK;
4568 }
4569
4570 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4571 {
4572         struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4573         __be16 vlan_proto = veth->h_vlan_proto;
4574         u16 vlanid;
4575
4576         if ((vlan_proto == htons(ETH_P_8021Q) &&
4577              dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4578             (vlan_proto == htons(ETH_P_8021AD) &&
4579              dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4580                 /* pop the vlan tag */
4581                 vlanid = ntohs(veth->h_vlan_TCI);
4582                 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4583                 skb_pull(skb, VLAN_HLEN);
4584                 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4585         }
4586 }
4587
4588 /**
4589  * stmmac_rx_refill - refill used skb preallocated buffers
4590  * @priv: driver private structure
4591  * @queue: RX queue index
4592  * Description : this is to reallocate the skb for the reception process
4593  * that is based on zero-copy.
4594  */
4595 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4596 {
4597         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4598         int dirty = stmmac_rx_dirty(priv, queue);
4599         unsigned int entry = rx_q->dirty_rx;
4600         gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4601
4602         if (priv->dma_cap.host_dma_width <= 32)
4603                 gfp |= GFP_DMA32;
4604
4605         while (dirty-- > 0) {
4606                 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4607                 struct dma_desc *p;
4608                 bool use_rx_wd;
4609
4610                 if (priv->extend_desc)
4611                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
4612                 else
4613                         p = rx_q->dma_rx + entry;
4614
4615                 if (!buf->page) {
4616                         buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4617                         if (!buf->page)
4618                                 break;
4619                 }
4620
4621                 if (priv->sph && !buf->sec_page) {
4622                         buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4623                         if (!buf->sec_page)
4624                                 break;
4625
4626                         buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4627                 }
4628
4629                 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4630
4631                 stmmac_set_desc_addr(priv, p, buf->addr);
4632                 if (priv->sph)
4633                         stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4634                 else
4635                         stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4636                 stmmac_refill_desc3(priv, rx_q, p);
4637
4638                 rx_q->rx_count_frames++;
4639                 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4640                 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4641                         rx_q->rx_count_frames = 0;
4642
4643                 use_rx_wd = !priv->rx_coal_frames[queue];
4644                 use_rx_wd |= rx_q->rx_count_frames > 0;
4645                 if (!priv->use_riwt)
4646                         use_rx_wd = false;
4647
4648                 dma_wmb();
4649                 stmmac_set_rx_owner(priv, p, use_rx_wd);
4650
4651                 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4652         }
4653         rx_q->dirty_rx = entry;
4654         rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4655                             (rx_q->dirty_rx * sizeof(struct dma_desc));
4656         stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4657 }
4658
4659 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4660                                        struct dma_desc *p,
4661                                        int status, unsigned int len)
4662 {
4663         unsigned int plen = 0, hlen = 0;
4664         int coe = priv->hw->rx_csum;
4665
4666         /* Not first descriptor, buffer is always zero */
4667         if (priv->sph && len)
4668                 return 0;
4669
4670         /* First descriptor, get split header length */
4671         stmmac_get_rx_header_len(priv, p, &hlen);
4672         if (priv->sph && hlen) {
4673                 priv->xstats.rx_split_hdr_pkt_n++;
4674                 return hlen;
4675         }
4676
4677         /* First descriptor, not last descriptor and not split header */
4678         if (status & rx_not_ls)
4679                 return priv->dma_conf.dma_buf_sz;
4680
4681         plen = stmmac_get_rx_frame_len(priv, p, coe);
4682
4683         /* First descriptor and last descriptor and not split header */
4684         return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4685 }
4686
4687 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4688                                        struct dma_desc *p,
4689                                        int status, unsigned int len)
4690 {
4691         int coe = priv->hw->rx_csum;
4692         unsigned int plen = 0;
4693
4694         /* Not split header, buffer is not available */
4695         if (!priv->sph)
4696                 return 0;
4697
4698         /* Not last descriptor */
4699         if (status & rx_not_ls)
4700                 return priv->dma_conf.dma_buf_sz;
4701
4702         plen = stmmac_get_rx_frame_len(priv, p, coe);
4703
4704         /* Last descriptor */
4705         return plen - len;
4706 }
4707
4708 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4709                                 struct xdp_frame *xdpf, bool dma_map)
4710 {
4711         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4712         unsigned int entry = tx_q->cur_tx;
4713         struct dma_desc *tx_desc;
4714         dma_addr_t dma_addr;
4715         bool set_ic;
4716
4717         if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4718                 return STMMAC_XDP_CONSUMED;
4719
4720         if (likely(priv->extend_desc))
4721                 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4722         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4723                 tx_desc = &tx_q->dma_entx[entry].basic;
4724         else
4725                 tx_desc = tx_q->dma_tx + entry;
4726
4727         if (dma_map) {
4728                 dma_addr = dma_map_single(priv->device, xdpf->data,
4729                                           xdpf->len, DMA_TO_DEVICE);
4730                 if (dma_mapping_error(priv->device, dma_addr))
4731                         return STMMAC_XDP_CONSUMED;
4732
4733                 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4734         } else {
4735                 struct page *page = virt_to_page(xdpf->data);
4736
4737                 dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4738                            xdpf->headroom;
4739                 dma_sync_single_for_device(priv->device, dma_addr,
4740                                            xdpf->len, DMA_BIDIRECTIONAL);
4741
4742                 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4743         }
4744
4745         tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4746         tx_q->tx_skbuff_dma[entry].map_as_page = false;
4747         tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4748         tx_q->tx_skbuff_dma[entry].last_segment = true;
4749         tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4750
4751         tx_q->xdpf[entry] = xdpf;
4752
4753         stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4754
4755         stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4756                                true, priv->mode, true, true,
4757                                xdpf->len);
4758
4759         tx_q->tx_count_frames++;
4760
4761         if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4762                 set_ic = true;
4763         else
4764                 set_ic = false;
4765
4766         if (set_ic) {
4767                 tx_q->tx_count_frames = 0;
4768                 stmmac_set_tx_ic(priv, tx_desc);
4769                 priv->xstats.tx_set_ic_bit++;
4770         }
4771
4772         stmmac_enable_dma_transmission(priv, priv->ioaddr);
4773
4774         entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4775         tx_q->cur_tx = entry;
4776
4777         return STMMAC_XDP_TX;
4778 }
4779
4780 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4781                                    int cpu)
4782 {
4783         int index = cpu;
4784
4785         if (unlikely(index < 0))
4786                 index = 0;
4787
4788         while (index >= priv->plat->tx_queues_to_use)
4789                 index -= priv->plat->tx_queues_to_use;
4790
4791         return index;
4792 }
4793
4794 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4795                                 struct xdp_buff *xdp)
4796 {
4797         struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4798         int cpu = smp_processor_id();
4799         struct netdev_queue *nq;
4800         int queue;
4801         int res;
4802
4803         if (unlikely(!xdpf))
4804                 return STMMAC_XDP_CONSUMED;
4805
4806         queue = stmmac_xdp_get_tx_queue(priv, cpu);
4807         nq = netdev_get_tx_queue(priv->dev, queue);
4808
4809         __netif_tx_lock(nq, cpu);
4810         /* Avoids TX time-out as we are sharing with slow path */
4811         txq_trans_cond_update(nq);
4812
4813         res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4814         if (res == STMMAC_XDP_TX)
4815                 stmmac_flush_tx_descriptors(priv, queue);
4816
4817         __netif_tx_unlock(nq);
4818
4819         return res;
4820 }
4821
4822 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4823                                  struct bpf_prog *prog,
4824                                  struct xdp_buff *xdp)
4825 {
4826         u32 act;
4827         int res;
4828
4829         act = bpf_prog_run_xdp(prog, xdp);
4830         switch (act) {
4831         case XDP_PASS:
4832                 res = STMMAC_XDP_PASS;
4833                 break;
4834         case XDP_TX:
4835                 res = stmmac_xdp_xmit_back(priv, xdp);
4836                 break;
4837         case XDP_REDIRECT:
4838                 if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
4839                         res = STMMAC_XDP_CONSUMED;
4840                 else
4841                         res = STMMAC_XDP_REDIRECT;
4842                 break;
4843         default:
4844                 bpf_warn_invalid_xdp_action(priv->dev, prog, act);
4845                 fallthrough;
4846         case XDP_ABORTED:
4847                 trace_xdp_exception(priv->dev, prog, act);
4848                 fallthrough;
4849         case XDP_DROP:
4850                 res = STMMAC_XDP_CONSUMED;
4851                 break;
4852         }
4853
4854         return res;
4855 }
4856
4857 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
4858                                            struct xdp_buff *xdp)
4859 {
4860         struct bpf_prog *prog;
4861         int res;
4862
4863         prog = READ_ONCE(priv->xdp_prog);
4864         if (!prog) {
4865                 res = STMMAC_XDP_PASS;
4866                 goto out;
4867         }
4868
4869         res = __stmmac_xdp_run_prog(priv, prog, xdp);
4870 out:
4871         return ERR_PTR(-res);
4872 }
4873
4874 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
4875                                    int xdp_status)
4876 {
4877         int cpu = smp_processor_id();
4878         int queue;
4879
4880         queue = stmmac_xdp_get_tx_queue(priv, cpu);
4881
4882         if (xdp_status & STMMAC_XDP_TX)
4883                 stmmac_tx_timer_arm(priv, queue);
4884
4885         if (xdp_status & STMMAC_XDP_REDIRECT)
4886                 xdp_do_flush();
4887 }
4888
4889 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
4890                                                struct xdp_buff *xdp)
4891 {
4892         unsigned int metasize = xdp->data - xdp->data_meta;
4893         unsigned int datasize = xdp->data_end - xdp->data;
4894         struct sk_buff *skb;
4895
4896         skb = __napi_alloc_skb(&ch->rxtx_napi,
4897                                xdp->data_end - xdp->data_hard_start,
4898                                GFP_ATOMIC | __GFP_NOWARN);
4899         if (unlikely(!skb))
4900                 return NULL;
4901
4902         skb_reserve(skb, xdp->data - xdp->data_hard_start);
4903         memcpy(__skb_put(skb, datasize), xdp->data, datasize);
4904         if (metasize)
4905                 skb_metadata_set(skb, metasize);
4906
4907         return skb;
4908 }
4909
4910 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
4911                                    struct dma_desc *p, struct dma_desc *np,
4912                                    struct xdp_buff *xdp)
4913 {
4914         struct stmmac_channel *ch = &priv->channel[queue];
4915         unsigned int len = xdp->data_end - xdp->data;
4916         enum pkt_hash_types hash_type;
4917         int coe = priv->hw->rx_csum;
4918         struct sk_buff *skb;
4919         u32 hash;
4920
4921         skb = stmmac_construct_skb_zc(ch, xdp);
4922         if (!skb) {
4923                 priv->dev->stats.rx_dropped++;
4924                 return;
4925         }
4926
4927         stmmac_get_rx_hwtstamp(priv, p, np, skb);
4928         stmmac_rx_vlan(priv->dev, skb);
4929         skb->protocol = eth_type_trans(skb, priv->dev);
4930
4931         if (unlikely(!coe))
4932                 skb_checksum_none_assert(skb);
4933         else
4934                 skb->ip_summed = CHECKSUM_UNNECESSARY;
4935
4936         if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
4937                 skb_set_hash(skb, hash, hash_type);
4938
4939         skb_record_rx_queue(skb, queue);
4940         napi_gro_receive(&ch->rxtx_napi, skb);
4941
4942         priv->dev->stats.rx_packets++;
4943         priv->dev->stats.rx_bytes += len;
4944 }
4945
4946 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
4947 {
4948         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4949         unsigned int entry = rx_q->dirty_rx;
4950         struct dma_desc *rx_desc = NULL;
4951         bool ret = true;
4952
4953         budget = min(budget, stmmac_rx_dirty(priv, queue));
4954
4955         while (budget-- > 0 && entry != rx_q->cur_rx) {
4956                 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4957                 dma_addr_t dma_addr;
4958                 bool use_rx_wd;
4959
4960                 if (!buf->xdp) {
4961                         buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
4962                         if (!buf->xdp) {
4963                                 ret = false;
4964                                 break;
4965                         }
4966                 }
4967
4968                 if (priv->extend_desc)
4969                         rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
4970                 else
4971                         rx_desc = rx_q->dma_rx + entry;
4972
4973                 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
4974                 stmmac_set_desc_addr(priv, rx_desc, dma_addr);
4975                 stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
4976                 stmmac_refill_desc3(priv, rx_q, rx_desc);
4977
4978                 rx_q->rx_count_frames++;
4979                 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4980                 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4981                         rx_q->rx_count_frames = 0;
4982
4983                 use_rx_wd = !priv->rx_coal_frames[queue];
4984                 use_rx_wd |= rx_q->rx_count_frames > 0;
4985                 if (!priv->use_riwt)
4986                         use_rx_wd = false;
4987
4988                 dma_wmb();
4989                 stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
4990
4991                 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4992         }
4993
4994         if (rx_desc) {
4995                 rx_q->dirty_rx = entry;
4996                 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4997                                      (rx_q->dirty_rx * sizeof(struct dma_desc));
4998                 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4999         }
5000
5001         return ret;
5002 }
5003
5004 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5005 {
5006         /* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5007          * to represent incoming packet, whereas cb field in the same structure
5008          * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5009          * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5010          */
5011         return (struct stmmac_xdp_buff *)xdp;
5012 }
5013
5014 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5015 {
5016         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5017         unsigned int count = 0, error = 0, len = 0;
5018         int dirty = stmmac_rx_dirty(priv, queue);
5019         unsigned int next_entry = rx_q->cur_rx;
5020         unsigned int desc_size;
5021         struct bpf_prog *prog;
5022         bool failure = false;
5023         int xdp_status = 0;
5024         int status = 0;
5025
5026         if (netif_msg_rx_status(priv)) {
5027                 void *rx_head;
5028
5029                 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5030                 if (priv->extend_desc) {
5031                         rx_head = (void *)rx_q->dma_erx;
5032                         desc_size = sizeof(struct dma_extended_desc);
5033                 } else {
5034                         rx_head = (void *)rx_q->dma_rx;
5035                         desc_size = sizeof(struct dma_desc);
5036                 }
5037
5038                 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5039                                     rx_q->dma_rx_phy, desc_size);
5040         }
5041         while (count < limit) {
5042                 struct stmmac_rx_buffer *buf;
5043                 struct stmmac_xdp_buff *ctx;
5044                 unsigned int buf1_len = 0;
5045                 struct dma_desc *np, *p;
5046                 int entry;
5047                 int res;
5048
5049                 if (!count && rx_q->state_saved) {
5050                         error = rx_q->state.error;
5051                         len = rx_q->state.len;
5052                 } else {
5053                         rx_q->state_saved = false;
5054                         error = 0;
5055                         len = 0;
5056                 }
5057
5058                 if (count >= limit)
5059                         break;
5060
5061 read_again:
5062                 buf1_len = 0;
5063                 entry = next_entry;
5064                 buf = &rx_q->buf_pool[entry];
5065
5066                 if (dirty >= STMMAC_RX_FILL_BATCH) {
5067                         failure = failure ||
5068                                   !stmmac_rx_refill_zc(priv, queue, dirty);
5069                         dirty = 0;
5070                 }
5071
5072                 if (priv->extend_desc)
5073                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
5074                 else
5075                         p = rx_q->dma_rx + entry;
5076
5077                 /* read the status of the incoming frame */
5078                 status = stmmac_rx_status(priv, &priv->dev->stats,
5079                                           &priv->xstats, p);
5080                 /* check if managed by the DMA otherwise go ahead */
5081                 if (unlikely(status & dma_own))
5082                         break;
5083
5084                 /* Prefetch the next RX descriptor */
5085                 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5086                                                 priv->dma_conf.dma_rx_size);
5087                 next_entry = rx_q->cur_rx;
5088
5089                 if (priv->extend_desc)
5090                         np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5091                 else
5092                         np = rx_q->dma_rx + next_entry;
5093
5094                 prefetch(np);
5095
5096                 /* Ensure a valid XSK buffer before proceed */
5097                 if (!buf->xdp)
5098                         break;
5099
5100                 if (priv->extend_desc)
5101                         stmmac_rx_extended_status(priv, &priv->dev->stats,
5102                                                   &priv->xstats,
5103                                                   rx_q->dma_erx + entry);
5104                 if (unlikely(status == discard_frame)) {
5105                         xsk_buff_free(buf->xdp);
5106                         buf->xdp = NULL;
5107                         dirty++;
5108                         error = 1;
5109                         if (!priv->hwts_rx_en)
5110                                 priv->dev->stats.rx_errors++;
5111                 }
5112
5113                 if (unlikely(error && (status & rx_not_ls)))
5114                         goto read_again;
5115                 if (unlikely(error)) {
5116                         count++;
5117                         continue;
5118                 }
5119
5120                 /* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5121                 if (likely(status & rx_not_ls)) {
5122                         xsk_buff_free(buf->xdp);
5123                         buf->xdp = NULL;
5124                         dirty++;
5125                         count++;
5126                         goto read_again;
5127                 }
5128
5129                 ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5130                 ctx->priv = priv;
5131                 ctx->desc = p;
5132                 ctx->ndesc = np;
5133
5134                 /* XDP ZC Frame only support primary buffers for now */
5135                 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5136                 len += buf1_len;
5137
5138                 /* ACS is disabled; strip manually. */
5139                 if (likely(!(status & rx_not_ls))) {
5140                         buf1_len -= ETH_FCS_LEN;
5141                         len -= ETH_FCS_LEN;
5142                 }
5143
5144                 /* RX buffer is good and fit into a XSK pool buffer */
5145                 buf->xdp->data_end = buf->xdp->data + buf1_len;
5146                 xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5147
5148                 prog = READ_ONCE(priv->xdp_prog);
5149                 res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5150
5151                 switch (res) {
5152                 case STMMAC_XDP_PASS:
5153                         stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5154                         xsk_buff_free(buf->xdp);
5155                         break;
5156                 case STMMAC_XDP_CONSUMED:
5157                         xsk_buff_free(buf->xdp);
5158                         priv->dev->stats.rx_dropped++;
5159                         break;
5160                 case STMMAC_XDP_TX:
5161                 case STMMAC_XDP_REDIRECT:
5162                         xdp_status |= res;
5163                         break;
5164                 }
5165
5166                 buf->xdp = NULL;
5167                 dirty++;
5168                 count++;
5169         }
5170
5171         if (status & rx_not_ls) {
5172                 rx_q->state_saved = true;
5173                 rx_q->state.error = error;
5174                 rx_q->state.len = len;
5175         }
5176
5177         stmmac_finalize_xdp_rx(priv, xdp_status);
5178
5179         priv->xstats.rx_pkt_n += count;
5180         priv->xstats.rxq_stats[queue].rx_pkt_n += count;
5181
5182         if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5183                 if (failure || stmmac_rx_dirty(priv, queue) > 0)
5184                         xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5185                 else
5186                         xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5187
5188                 return (int)count;
5189         }
5190
5191         return failure ? limit : (int)count;
5192 }
5193
5194 /**
5195  * stmmac_rx - manage the receive process
5196  * @priv: driver private structure
5197  * @limit: napi bugget
5198  * @queue: RX queue index.
5199  * Description :  this the function called by the napi poll method.
5200  * It gets all the frames inside the ring.
5201  */
5202 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5203 {
5204         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5205         struct stmmac_channel *ch = &priv->channel[queue];
5206         unsigned int count = 0, error = 0, len = 0;
5207         int status = 0, coe = priv->hw->rx_csum;
5208         unsigned int next_entry = rx_q->cur_rx;
5209         enum dma_data_direction dma_dir;
5210         unsigned int desc_size;
5211         struct sk_buff *skb = NULL;
5212         struct stmmac_xdp_buff ctx;
5213         int xdp_status = 0;
5214         int buf_sz;
5215
5216         dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5217         buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5218
5219         if (netif_msg_rx_status(priv)) {
5220                 void *rx_head;
5221
5222                 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5223                 if (priv->extend_desc) {
5224                         rx_head = (void *)rx_q->dma_erx;
5225                         desc_size = sizeof(struct dma_extended_desc);
5226                 } else {
5227                         rx_head = (void *)rx_q->dma_rx;
5228                         desc_size = sizeof(struct dma_desc);
5229                 }
5230
5231                 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5232                                     rx_q->dma_rx_phy, desc_size);
5233         }
5234         while (count < limit) {
5235                 unsigned int buf1_len = 0, buf2_len = 0;
5236                 enum pkt_hash_types hash_type;
5237                 struct stmmac_rx_buffer *buf;
5238                 struct dma_desc *np, *p;
5239                 int entry;
5240                 u32 hash;
5241
5242                 if (!count && rx_q->state_saved) {
5243                         skb = rx_q->state.skb;
5244                         error = rx_q->state.error;
5245                         len = rx_q->state.len;
5246                 } else {
5247                         rx_q->state_saved = false;
5248                         skb = NULL;
5249                         error = 0;
5250                         len = 0;
5251                 }
5252
5253                 if (count >= limit)
5254                         break;
5255
5256 read_again:
5257                 buf1_len = 0;
5258                 buf2_len = 0;
5259                 entry = next_entry;
5260                 buf = &rx_q->buf_pool[entry];
5261
5262                 if (priv->extend_desc)
5263                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
5264                 else
5265                         p = rx_q->dma_rx + entry;
5266
5267                 /* read the status of the incoming frame */
5268                 status = stmmac_rx_status(priv, &priv->dev->stats,
5269                                 &priv->xstats, p);
5270                 /* check if managed by the DMA otherwise go ahead */
5271                 if (unlikely(status & dma_own))
5272                         break;
5273
5274                 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5275                                                 priv->dma_conf.dma_rx_size);
5276                 next_entry = rx_q->cur_rx;
5277
5278                 if (priv->extend_desc)
5279                         np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5280                 else
5281                         np = rx_q->dma_rx + next_entry;
5282
5283                 prefetch(np);
5284
5285                 if (priv->extend_desc)
5286                         stmmac_rx_extended_status(priv, &priv->dev->stats,
5287                                         &priv->xstats, rx_q->dma_erx + entry);
5288                 if (unlikely(status == discard_frame)) {
5289                         page_pool_recycle_direct(rx_q->page_pool, buf->page);
5290                         buf->page = NULL;
5291                         error = 1;
5292                         if (!priv->hwts_rx_en)
5293                                 priv->dev->stats.rx_errors++;
5294                 }
5295
5296                 if (unlikely(error && (status & rx_not_ls)))
5297                         goto read_again;
5298                 if (unlikely(error)) {
5299                         dev_kfree_skb(skb);
5300                         skb = NULL;
5301                         count++;
5302                         continue;
5303                 }
5304
5305                 /* Buffer is good. Go on. */
5306
5307                 prefetch(page_address(buf->page) + buf->page_offset);
5308                 if (buf->sec_page)
5309                         prefetch(page_address(buf->sec_page));
5310
5311                 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5312                 len += buf1_len;
5313                 buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5314                 len += buf2_len;
5315
5316                 /* ACS is disabled; strip manually. */
5317                 if (likely(!(status & rx_not_ls))) {
5318                         if (buf2_len) {
5319                                 buf2_len -= ETH_FCS_LEN;
5320                                 len -= ETH_FCS_LEN;
5321                         } else if (buf1_len) {
5322                                 buf1_len -= ETH_FCS_LEN;
5323                                 len -= ETH_FCS_LEN;
5324                         }
5325                 }
5326
5327                 if (!skb) {
5328                         unsigned int pre_len, sync_len;
5329
5330                         dma_sync_single_for_cpu(priv->device, buf->addr,
5331                                                 buf1_len, dma_dir);
5332
5333                         xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5334                         xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5335                                          buf->page_offset, buf1_len, true);
5336
5337                         pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5338                                   buf->page_offset;
5339
5340                         ctx.priv = priv;
5341                         ctx.desc = p;
5342                         ctx.ndesc = np;
5343
5344                         skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5345                         /* Due xdp_adjust_tail: DMA sync for_device
5346                          * cover max len CPU touch
5347                          */
5348                         sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5349                                    buf->page_offset;
5350                         sync_len = max(sync_len, pre_len);
5351
5352                         /* For Not XDP_PASS verdict */
5353                         if (IS_ERR(skb)) {
5354                                 unsigned int xdp_res = -PTR_ERR(skb);
5355
5356                                 if (xdp_res & STMMAC_XDP_CONSUMED) {
5357                                         page_pool_put_page(rx_q->page_pool,
5358                                                            virt_to_head_page(ctx.xdp.data),
5359                                                            sync_len, true);
5360                                         buf->page = NULL;
5361                                         priv->dev->stats.rx_dropped++;
5362
5363                                         /* Clear skb as it was set as
5364                                          * status by XDP program.
5365                                          */
5366                                         skb = NULL;
5367
5368                                         if (unlikely((status & rx_not_ls)))
5369                                                 goto read_again;
5370
5371                                         count++;
5372                                         continue;
5373                                 } else if (xdp_res & (STMMAC_XDP_TX |
5374                                                       STMMAC_XDP_REDIRECT)) {
5375                                         xdp_status |= xdp_res;
5376                                         buf->page = NULL;
5377                                         skb = NULL;
5378                                         count++;
5379                                         continue;
5380                                 }
5381                         }
5382                 }
5383
5384                 if (!skb) {
5385                         /* XDP program may expand or reduce tail */
5386                         buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5387
5388                         skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5389                         if (!skb) {
5390                                 priv->dev->stats.rx_dropped++;
5391                                 count++;
5392                                 goto drain_data;
5393                         }
5394
5395                         /* XDP program may adjust header */
5396                         skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5397                         skb_put(skb, buf1_len);
5398
5399                         /* Data payload copied into SKB, page ready for recycle */
5400                         page_pool_recycle_direct(rx_q->page_pool, buf->page);
5401                         buf->page = NULL;
5402                 } else if (buf1_len) {
5403                         dma_sync_single_for_cpu(priv->device, buf->addr,
5404                                                 buf1_len, dma_dir);
5405                         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5406                                         buf->page, buf->page_offset, buf1_len,
5407                                         priv->dma_conf.dma_buf_sz);
5408
5409                         /* Data payload appended into SKB */
5410                         page_pool_release_page(rx_q->page_pool, buf->page);
5411                         buf->page = NULL;
5412                 }
5413
5414                 if (buf2_len) {
5415                         dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5416                                                 buf2_len, dma_dir);
5417                         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5418                                         buf->sec_page, 0, buf2_len,
5419                                         priv->dma_conf.dma_buf_sz);
5420
5421                         /* Data payload appended into SKB */
5422                         page_pool_release_page(rx_q->page_pool, buf->sec_page);
5423                         buf->sec_page = NULL;
5424                 }
5425
5426 drain_data:
5427                 if (likely(status & rx_not_ls))
5428                         goto read_again;
5429                 if (!skb)
5430                         continue;
5431
5432                 /* Got entire packet into SKB. Finish it. */
5433
5434                 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5435                 stmmac_rx_vlan(priv->dev, skb);
5436                 skb->protocol = eth_type_trans(skb, priv->dev);
5437
5438                 if (unlikely(!coe))
5439                         skb_checksum_none_assert(skb);
5440                 else
5441                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5442
5443                 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5444                         skb_set_hash(skb, hash, hash_type);
5445
5446                 skb_record_rx_queue(skb, queue);
5447                 napi_gro_receive(&ch->rx_napi, skb);
5448                 skb = NULL;
5449
5450                 priv->dev->stats.rx_packets++;
5451                 priv->dev->stats.rx_bytes += len;
5452                 count++;
5453         }
5454
5455         if (status & rx_not_ls || skb) {
5456                 rx_q->state_saved = true;
5457                 rx_q->state.skb = skb;
5458                 rx_q->state.error = error;
5459                 rx_q->state.len = len;
5460         }
5461
5462         stmmac_finalize_xdp_rx(priv, xdp_status);
5463
5464         stmmac_rx_refill(priv, queue);
5465
5466         priv->xstats.rx_pkt_n += count;
5467         priv->xstats.rxq_stats[queue].rx_pkt_n += count;
5468
5469         return count;
5470 }
5471
5472 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5473 {
5474         struct stmmac_channel *ch =
5475                 container_of(napi, struct stmmac_channel, rx_napi);
5476         struct stmmac_priv *priv = ch->priv_data;
5477         u32 chan = ch->index;
5478         int work_done;
5479
5480         priv->xstats.napi_poll++;
5481
5482         work_done = stmmac_rx(priv, budget, chan);
5483         if (work_done < budget && napi_complete_done(napi, work_done)) {
5484                 unsigned long flags;
5485
5486                 spin_lock_irqsave(&ch->lock, flags);
5487                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5488                 spin_unlock_irqrestore(&ch->lock, flags);
5489         }
5490
5491         return work_done;
5492 }
5493
5494 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5495 {
5496         struct stmmac_channel *ch =
5497                 container_of(napi, struct stmmac_channel, tx_napi);
5498         struct stmmac_priv *priv = ch->priv_data;
5499         u32 chan = ch->index;
5500         int work_done;
5501
5502         priv->xstats.napi_poll++;
5503
5504         work_done = stmmac_tx_clean(priv, budget, chan);
5505         work_done = min(work_done, budget);
5506
5507         if (work_done < budget && napi_complete_done(napi, work_done)) {
5508                 unsigned long flags;
5509
5510                 spin_lock_irqsave(&ch->lock, flags);
5511                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5512                 spin_unlock_irqrestore(&ch->lock, flags);
5513         }
5514
5515         return work_done;
5516 }
5517
5518 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5519 {
5520         struct stmmac_channel *ch =
5521                 container_of(napi, struct stmmac_channel, rxtx_napi);
5522         struct stmmac_priv *priv = ch->priv_data;
5523         int rx_done, tx_done, rxtx_done;
5524         u32 chan = ch->index;
5525
5526         priv->xstats.napi_poll++;
5527
5528         tx_done = stmmac_tx_clean(priv, budget, chan);
5529         tx_done = min(tx_done, budget);
5530
5531         rx_done = stmmac_rx_zc(priv, budget, chan);
5532
5533         rxtx_done = max(tx_done, rx_done);
5534
5535         /* If either TX or RX work is not complete, return budget
5536          * and keep pooling
5537          */
5538         if (rxtx_done >= budget)
5539                 return budget;
5540
5541         /* all work done, exit the polling mode */
5542         if (napi_complete_done(napi, rxtx_done)) {
5543                 unsigned long flags;
5544
5545                 spin_lock_irqsave(&ch->lock, flags);
5546                 /* Both RX and TX work done are compelte,
5547                  * so enable both RX & TX IRQs.
5548                  */
5549                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5550                 spin_unlock_irqrestore(&ch->lock, flags);
5551         }
5552
5553         return min(rxtx_done, budget - 1);
5554 }
5555
5556 /**
5557  *  stmmac_tx_timeout
5558  *  @dev : Pointer to net device structure
5559  *  @txqueue: the index of the hanging transmit queue
5560  *  Description: this function is called when a packet transmission fails to
5561  *   complete within a reasonable time. The driver will mark the error in the
5562  *   netdev structure and arrange for the device to be reset to a sane state
5563  *   in order to transmit a new packet.
5564  */
5565 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5566 {
5567         struct stmmac_priv *priv = netdev_priv(dev);
5568
5569         stmmac_global_err(priv);
5570 }
5571
5572 /**
5573  *  stmmac_set_rx_mode - entry point for multicast addressing
5574  *  @dev : pointer to the device structure
5575  *  Description:
5576  *  This function is a driver entry point which gets called by the kernel
5577  *  whenever multicast addresses must be enabled/disabled.
5578  *  Return value:
5579  *  void.
5580  */
5581 static void stmmac_set_rx_mode(struct net_device *dev)
5582 {
5583         struct stmmac_priv *priv = netdev_priv(dev);
5584
5585         stmmac_set_filter(priv, priv->hw, dev);
5586 }
5587
5588 /**
5589  *  stmmac_change_mtu - entry point to change MTU size for the device.
5590  *  @dev : device pointer.
5591  *  @new_mtu : the new MTU size for the device.
5592  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5593  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5594  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5595  *  Return value:
5596  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5597  *  file on failure.
5598  */
5599 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5600 {
5601         struct stmmac_priv *priv = netdev_priv(dev);
5602         int txfifosz = priv->plat->tx_fifo_size;
5603         struct stmmac_dma_conf *dma_conf;
5604         const int mtu = new_mtu;
5605         int ret;
5606
5607         if (txfifosz == 0)
5608                 txfifosz = priv->dma_cap.tx_fifo_size;
5609
5610         txfifosz /= priv->plat->tx_queues_to_use;
5611
5612         if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5613                 netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5614                 return -EINVAL;
5615         }
5616
5617         new_mtu = STMMAC_ALIGN(new_mtu);
5618
5619         /* If condition true, FIFO is too small or MTU too large */
5620         if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5621                 return -EINVAL;
5622
5623         if (netif_running(dev)) {
5624                 netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5625                 /* Try to allocate the new DMA conf with the new mtu */
5626                 dma_conf = stmmac_setup_dma_desc(priv, mtu);
5627                 if (IS_ERR(dma_conf)) {
5628                         netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5629                                    mtu);
5630                         return PTR_ERR(dma_conf);
5631                 }
5632
5633                 stmmac_release(dev);
5634
5635                 ret = __stmmac_open(dev, dma_conf);
5636                 kfree(dma_conf);
5637                 if (ret) {
5638                         netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5639                         return ret;
5640                 }
5641
5642                 stmmac_set_rx_mode(dev);
5643         }
5644
5645         dev->mtu = mtu;
5646         netdev_update_features(dev);
5647
5648         return 0;
5649 }
5650
5651 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5652                                              netdev_features_t features)
5653 {
5654         struct stmmac_priv *priv = netdev_priv(dev);
5655
5656         if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5657                 features &= ~NETIF_F_RXCSUM;
5658
5659         if (!priv->plat->tx_coe)
5660                 features &= ~NETIF_F_CSUM_MASK;
5661
5662         /* Some GMAC devices have a bugged Jumbo frame support that
5663          * needs to have the Tx COE disabled for oversized frames
5664          * (due to limited buffer sizes). In this case we disable
5665          * the TX csum insertion in the TDES and not use SF.
5666          */
5667         if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5668                 features &= ~NETIF_F_CSUM_MASK;
5669
5670         /* Disable tso if asked by ethtool */
5671         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
5672                 if (features & NETIF_F_TSO)
5673                         priv->tso = true;
5674                 else
5675                         priv->tso = false;
5676         }
5677
5678         return features;
5679 }
5680
5681 static int stmmac_set_features(struct net_device *netdev,
5682                                netdev_features_t features)
5683 {
5684         struct stmmac_priv *priv = netdev_priv(netdev);
5685
5686         /* Keep the COE Type in case of csum is supporting */
5687         if (features & NETIF_F_RXCSUM)
5688                 priv->hw->rx_csum = priv->plat->rx_coe;
5689         else
5690                 priv->hw->rx_csum = 0;
5691         /* No check needed because rx_coe has been set before and it will be
5692          * fixed in case of issue.
5693          */
5694         stmmac_rx_ipc(priv, priv->hw);
5695
5696         if (priv->sph_cap) {
5697                 bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5698                 u32 chan;
5699
5700                 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5701                         stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5702         }
5703
5704         return 0;
5705 }
5706
5707 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5708 {
5709         struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5710         enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5711         enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5712         bool *hs_enable = &fpe_cfg->hs_enable;
5713
5714         if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5715                 return;
5716
5717         /* If LP has sent verify mPacket, LP is FPE capable */
5718         if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5719                 if (*lp_state < FPE_STATE_CAPABLE)
5720                         *lp_state = FPE_STATE_CAPABLE;
5721
5722                 /* If user has requested FPE enable, quickly response */
5723                 if (*hs_enable)
5724                         stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5725                                                 MPACKET_RESPONSE);
5726         }
5727
5728         /* If Local has sent verify mPacket, Local is FPE capable */
5729         if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
5730                 if (*lo_state < FPE_STATE_CAPABLE)
5731                         *lo_state = FPE_STATE_CAPABLE;
5732         }
5733
5734         /* If LP has sent response mPacket, LP is entering FPE ON */
5735         if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
5736                 *lp_state = FPE_STATE_ENTERING_ON;
5737
5738         /* If Local has sent response mPacket, Local is entering FPE ON */
5739         if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
5740                 *lo_state = FPE_STATE_ENTERING_ON;
5741
5742         if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
5743             !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
5744             priv->fpe_wq) {
5745                 queue_work(priv->fpe_wq, &priv->fpe_task);
5746         }
5747 }
5748
5749 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5750 {
5751         u32 rx_cnt = priv->plat->rx_queues_to_use;
5752         u32 tx_cnt = priv->plat->tx_queues_to_use;
5753         u32 queues_count;
5754         u32 queue;
5755         bool xmac;
5756
5757         xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5758         queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5759
5760         if (priv->irq_wake)
5761                 pm_wakeup_event(priv->device, 0);
5762
5763         if (priv->dma_cap.estsel)
5764                 stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
5765                                       &priv->xstats, tx_cnt);
5766
5767         if (priv->dma_cap.fpesel) {
5768                 int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
5769                                                    priv->dev);
5770
5771                 stmmac_fpe_event_status(priv, status);
5772         }
5773
5774         /* To handle GMAC own interrupts */
5775         if ((priv->plat->has_gmac) || xmac) {
5776                 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5777
5778                 if (unlikely(status)) {
5779                         /* For LPI we need to save the tx status */
5780                         if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5781                                 priv->tx_path_in_lpi_mode = true;
5782                         if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5783                                 priv->tx_path_in_lpi_mode = false;
5784                 }
5785
5786                 for (queue = 0; queue < queues_count; queue++) {
5787                         status = stmmac_host_mtl_irq_status(priv, priv->hw,
5788                                                             queue);
5789                 }
5790
5791                 /* PCS link status */
5792                 if (priv->hw->pcs) {
5793                         if (priv->xstats.pcs_link)
5794                                 netif_carrier_on(priv->dev);
5795                         else
5796                                 netif_carrier_off(priv->dev);
5797                 }
5798
5799                 stmmac_timestamp_interrupt(priv, priv);
5800         }
5801 }
5802
5803 /**
5804  *  stmmac_interrupt - main ISR
5805  *  @irq: interrupt number.
5806  *  @dev_id: to pass the net device pointer.
5807  *  Description: this is the main driver interrupt service routine.
5808  *  It can call:
5809  *  o DMA service routine (to manage incoming frame reception and transmission
5810  *    status)
5811  *  o Core interrupts to manage: remote wake-up, management counter, LPI
5812  *    interrupts.
5813  */
5814 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
5815 {
5816         struct net_device *dev = (struct net_device *)dev_id;
5817         struct stmmac_priv *priv = netdev_priv(dev);
5818
5819         /* Check if adapter is up */
5820         if (test_bit(STMMAC_DOWN, &priv->state))
5821                 return IRQ_HANDLED;
5822
5823         /* Check if a fatal error happened */
5824         if (stmmac_safety_feat_interrupt(priv))
5825                 return IRQ_HANDLED;
5826
5827         /* To handle Common interrupts */
5828         stmmac_common_interrupt(priv);
5829
5830         /* To handle DMA interrupts */
5831         stmmac_dma_interrupt(priv);
5832
5833         return IRQ_HANDLED;
5834 }
5835
5836 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
5837 {
5838         struct net_device *dev = (struct net_device *)dev_id;
5839         struct stmmac_priv *priv = netdev_priv(dev);
5840
5841         if (unlikely(!dev)) {
5842                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5843                 return IRQ_NONE;
5844         }
5845
5846         /* Check if adapter is up */
5847         if (test_bit(STMMAC_DOWN, &priv->state))
5848                 return IRQ_HANDLED;
5849
5850         /* To handle Common interrupts */
5851         stmmac_common_interrupt(priv);
5852
5853         return IRQ_HANDLED;
5854 }
5855
5856 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
5857 {
5858         struct net_device *dev = (struct net_device *)dev_id;
5859         struct stmmac_priv *priv = netdev_priv(dev);
5860
5861         if (unlikely(!dev)) {
5862                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5863                 return IRQ_NONE;
5864         }
5865
5866         /* Check if adapter is up */
5867         if (test_bit(STMMAC_DOWN, &priv->state))
5868                 return IRQ_HANDLED;
5869
5870         /* Check if a fatal error happened */
5871         stmmac_safety_feat_interrupt(priv);
5872
5873         return IRQ_HANDLED;
5874 }
5875
5876 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
5877 {
5878         struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
5879         struct stmmac_dma_conf *dma_conf;
5880         int chan = tx_q->queue_index;
5881         struct stmmac_priv *priv;
5882         int status;
5883
5884         dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
5885         priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
5886
5887         if (unlikely(!data)) {
5888                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5889                 return IRQ_NONE;
5890         }
5891
5892         /* Check if adapter is up */
5893         if (test_bit(STMMAC_DOWN, &priv->state))
5894                 return IRQ_HANDLED;
5895
5896         status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
5897
5898         if (unlikely(status & tx_hard_error_bump_tc)) {
5899                 /* Try to bump up the dma threshold on this failure */
5900                 stmmac_bump_dma_threshold(priv, chan);
5901         } else if (unlikely(status == tx_hard_error)) {
5902                 stmmac_tx_err(priv, chan);
5903         }
5904
5905         return IRQ_HANDLED;
5906 }
5907
5908 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
5909 {
5910         struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
5911         struct stmmac_dma_conf *dma_conf;
5912         int chan = rx_q->queue_index;
5913         struct stmmac_priv *priv;
5914
5915         dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
5916         priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
5917
5918         if (unlikely(!data)) {
5919                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5920                 return IRQ_NONE;
5921         }
5922
5923         /* Check if adapter is up */
5924         if (test_bit(STMMAC_DOWN, &priv->state))
5925                 return IRQ_HANDLED;
5926
5927         stmmac_napi_check(priv, chan, DMA_DIR_RX);
5928
5929         return IRQ_HANDLED;
5930 }
5931
5932 #ifdef CONFIG_NET_POLL_CONTROLLER
5933 /* Polling receive - used by NETCONSOLE and other diagnostic tools
5934  * to allow network I/O with interrupts disabled.
5935  */
5936 static void stmmac_poll_controller(struct net_device *dev)
5937 {
5938         struct stmmac_priv *priv = netdev_priv(dev);
5939         int i;
5940
5941         /* If adapter is down, do nothing */
5942         if (test_bit(STMMAC_DOWN, &priv->state))
5943                 return;
5944
5945         if (priv->plat->multi_msi_en) {
5946                 for (i = 0; i < priv->plat->rx_queues_to_use; i++)
5947                         stmmac_msi_intr_rx(0, &priv->dma_conf.rx_queue[i]);
5948
5949                 for (i = 0; i < priv->plat->tx_queues_to_use; i++)
5950                         stmmac_msi_intr_tx(0, &priv->dma_conf.tx_queue[i]);
5951         } else {
5952                 disable_irq(dev->irq);
5953                 stmmac_interrupt(dev->irq, dev);
5954                 enable_irq(dev->irq);
5955         }
5956 }
5957 #endif
5958
5959 /**
5960  *  stmmac_ioctl - Entry point for the Ioctl
5961  *  @dev: Device pointer.
5962  *  @rq: An IOCTL specefic structure, that can contain a pointer to
5963  *  a proprietary structure used to pass information to the driver.
5964  *  @cmd: IOCTL command
5965  *  Description:
5966  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
5967  */
5968 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5969 {
5970         struct stmmac_priv *priv = netdev_priv (dev);
5971         int ret = -EOPNOTSUPP;
5972
5973         if (!netif_running(dev))
5974                 return -EINVAL;
5975
5976         switch (cmd) {
5977         case SIOCGMIIPHY:
5978         case SIOCGMIIREG:
5979         case SIOCSMIIREG:
5980                 ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
5981                 break;
5982         case SIOCSHWTSTAMP:
5983                 ret = stmmac_hwtstamp_set(dev, rq);
5984                 break;
5985         case SIOCGHWTSTAMP:
5986                 ret = stmmac_hwtstamp_get(dev, rq);
5987                 break;
5988         default:
5989                 break;
5990         }
5991
5992         return ret;
5993 }
5994
5995 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
5996                                     void *cb_priv)
5997 {
5998         struct stmmac_priv *priv = cb_priv;
5999         int ret = -EOPNOTSUPP;
6000
6001         if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6002                 return ret;
6003
6004         __stmmac_disable_all_queues(priv);
6005
6006         switch (type) {
6007         case TC_SETUP_CLSU32:
6008                 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6009                 break;
6010         case TC_SETUP_CLSFLOWER:
6011                 ret = stmmac_tc_setup_cls(priv, priv, type_data);
6012                 break;
6013         default:
6014                 break;
6015         }
6016
6017         stmmac_enable_all_queues(priv);
6018         return ret;
6019 }
6020
6021 static LIST_HEAD(stmmac_block_cb_list);
6022
6023 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6024                            void *type_data)
6025 {
6026         struct stmmac_priv *priv = netdev_priv(ndev);
6027
6028         switch (type) {
6029         case TC_QUERY_CAPS:
6030                 return stmmac_tc_query_caps(priv, priv, type_data);
6031         case TC_SETUP_BLOCK:
6032                 return flow_block_cb_setup_simple(type_data,
6033                                                   &stmmac_block_cb_list,
6034                                                   stmmac_setup_tc_block_cb,
6035                                                   priv, priv, true);
6036         case TC_SETUP_QDISC_CBS:
6037                 return stmmac_tc_setup_cbs(priv, priv, type_data);
6038         case TC_SETUP_QDISC_TAPRIO:
6039                 return stmmac_tc_setup_taprio(priv, priv, type_data);
6040         case TC_SETUP_QDISC_ETF:
6041                 return stmmac_tc_setup_etf(priv, priv, type_data);
6042         default:
6043                 return -EOPNOTSUPP;
6044         }
6045 }
6046
6047 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6048                                struct net_device *sb_dev)
6049 {
6050         int gso = skb_shinfo(skb)->gso_type;
6051
6052         if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6053                 /*
6054                  * There is no way to determine the number of TSO/USO
6055                  * capable Queues. Let's use always the Queue 0
6056                  * because if TSO/USO is supported then at least this
6057                  * one will be capable.
6058                  */
6059                 return 0;
6060         }
6061
6062         return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6063 }
6064
6065 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6066 {
6067         struct stmmac_priv *priv = netdev_priv(ndev);
6068         int ret = 0;
6069
6070         ret = pm_runtime_resume_and_get(priv->device);
6071         if (ret < 0)
6072                 return ret;
6073
6074         ret = eth_mac_addr(ndev, addr);
6075         if (ret)
6076                 goto set_mac_error;
6077
6078         stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6079
6080 set_mac_error:
6081         pm_runtime_put(priv->device);
6082
6083         return ret;
6084 }
6085
6086 #ifdef CONFIG_DEBUG_FS
6087 static struct dentry *stmmac_fs_dir;
6088
6089 static void sysfs_display_ring(void *head, int size, int extend_desc,
6090                                struct seq_file *seq, dma_addr_t dma_phy_addr)
6091 {
6092         int i;
6093         struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6094         struct dma_desc *p = (struct dma_desc *)head;
6095         dma_addr_t dma_addr;
6096
6097         for (i = 0; i < size; i++) {
6098                 if (extend_desc) {
6099                         dma_addr = dma_phy_addr + i * sizeof(*ep);
6100                         seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6101                                    i, &dma_addr,
6102                                    le32_to_cpu(ep->basic.des0),
6103                                    le32_to_cpu(ep->basic.des1),
6104                                    le32_to_cpu(ep->basic.des2),
6105                                    le32_to_cpu(ep->basic.des3));
6106                         ep++;
6107                 } else {
6108                         dma_addr = dma_phy_addr + i * sizeof(*p);
6109                         seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6110                                    i, &dma_addr,
6111                                    le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6112                                    le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6113                         p++;
6114                 }
6115                 seq_printf(seq, "\n");
6116         }
6117 }
6118
6119 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6120 {
6121         struct net_device *dev = seq->private;
6122         struct stmmac_priv *priv = netdev_priv(dev);
6123         u32 rx_count = priv->plat->rx_queues_to_use;
6124         u32 tx_count = priv->plat->tx_queues_to_use;
6125         u32 queue;
6126
6127         if ((dev->flags & IFF_UP) == 0)
6128                 return 0;
6129
6130         for (queue = 0; queue < rx_count; queue++) {
6131                 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6132
6133                 seq_printf(seq, "RX Queue %d:\n", queue);
6134
6135                 if (priv->extend_desc) {
6136                         seq_printf(seq, "Extended descriptor ring:\n");
6137                         sysfs_display_ring((void *)rx_q->dma_erx,
6138                                            priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6139                 } else {
6140                         seq_printf(seq, "Descriptor ring:\n");
6141                         sysfs_display_ring((void *)rx_q->dma_rx,
6142                                            priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6143                 }
6144         }
6145
6146         for (queue = 0; queue < tx_count; queue++) {
6147                 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6148
6149                 seq_printf(seq, "TX Queue %d:\n", queue);
6150
6151                 if (priv->extend_desc) {
6152                         seq_printf(seq, "Extended descriptor ring:\n");
6153                         sysfs_display_ring((void *)tx_q->dma_etx,
6154                                            priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6155                 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6156                         seq_printf(seq, "Descriptor ring:\n");
6157                         sysfs_display_ring((void *)tx_q->dma_tx,
6158                                            priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6159                 }
6160         }
6161
6162         return 0;
6163 }
6164 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6165
6166 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6167 {
6168         struct net_device *dev = seq->private;
6169         struct stmmac_priv *priv = netdev_priv(dev);
6170
6171         if (!priv->hw_cap_support) {
6172                 seq_printf(seq, "DMA HW features not supported\n");
6173                 return 0;
6174         }
6175
6176         seq_printf(seq, "==============================\n");
6177         seq_printf(seq, "\tDMA HW features\n");
6178         seq_printf(seq, "==============================\n");
6179
6180         seq_printf(seq, "\t10/100 Mbps: %s\n",
6181                    (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6182         seq_printf(seq, "\t1000 Mbps: %s\n",
6183                    (priv->dma_cap.mbps_1000) ? "Y" : "N");
6184         seq_printf(seq, "\tHalf duplex: %s\n",
6185                    (priv->dma_cap.half_duplex) ? "Y" : "N");
6186         seq_printf(seq, "\tHash Filter: %s\n",
6187                    (priv->dma_cap.hash_filter) ? "Y" : "N");
6188         seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6189                    (priv->dma_cap.multi_addr) ? "Y" : "N");
6190         seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6191                    (priv->dma_cap.pcs) ? "Y" : "N");
6192         seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6193                    (priv->dma_cap.sma_mdio) ? "Y" : "N");
6194         seq_printf(seq, "\tPMT Remote wake up: %s\n",
6195                    (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6196         seq_printf(seq, "\tPMT Magic Frame: %s\n",
6197                    (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6198         seq_printf(seq, "\tRMON module: %s\n",
6199                    (priv->dma_cap.rmon) ? "Y" : "N");
6200         seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6201                    (priv->dma_cap.time_stamp) ? "Y" : "N");
6202         seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6203                    (priv->dma_cap.atime_stamp) ? "Y" : "N");
6204         seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6205                    (priv->dma_cap.eee) ? "Y" : "N");
6206         seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6207         seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6208                    (priv->dma_cap.tx_coe) ? "Y" : "N");
6209         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
6210                 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6211                            (priv->dma_cap.rx_coe) ? "Y" : "N");
6212         } else {
6213                 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6214                            (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6215                 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6216                            (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6217         }
6218         seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6219                    (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6220         seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6221                    priv->dma_cap.number_rx_channel);
6222         seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6223                    priv->dma_cap.number_tx_channel);
6224         seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6225                    priv->dma_cap.number_rx_queues);
6226         seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6227                    priv->dma_cap.number_tx_queues);
6228         seq_printf(seq, "\tEnhanced descriptors: %s\n",
6229                    (priv->dma_cap.enh_desc) ? "Y" : "N");
6230         seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6231         seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6232         seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
6233         seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6234         seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6235                    priv->dma_cap.pps_out_num);
6236         seq_printf(seq, "\tSafety Features: %s\n",
6237                    priv->dma_cap.asp ? "Y" : "N");
6238         seq_printf(seq, "\tFlexible RX Parser: %s\n",
6239                    priv->dma_cap.frpsel ? "Y" : "N");
6240         seq_printf(seq, "\tEnhanced Addressing: %d\n",
6241                    priv->dma_cap.host_dma_width);
6242         seq_printf(seq, "\tReceive Side Scaling: %s\n",
6243                    priv->dma_cap.rssen ? "Y" : "N");
6244         seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6245                    priv->dma_cap.vlhash ? "Y" : "N");
6246         seq_printf(seq, "\tSplit Header: %s\n",
6247                    priv->dma_cap.sphen ? "Y" : "N");
6248         seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6249                    priv->dma_cap.vlins ? "Y" : "N");
6250         seq_printf(seq, "\tDouble VLAN: %s\n",
6251                    priv->dma_cap.dvlan ? "Y" : "N");
6252         seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6253                    priv->dma_cap.l3l4fnum);
6254         seq_printf(seq, "\tARP Offloading: %s\n",
6255                    priv->dma_cap.arpoffsel ? "Y" : "N");
6256         seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6257                    priv->dma_cap.estsel ? "Y" : "N");
6258         seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6259                    priv->dma_cap.fpesel ? "Y" : "N");
6260         seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6261                    priv->dma_cap.tbssel ? "Y" : "N");
6262         return 0;
6263 }
6264 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6265
6266 /* Use network device events to rename debugfs file entries.
6267  */
6268 static int stmmac_device_event(struct notifier_block *unused,
6269                                unsigned long event, void *ptr)
6270 {
6271         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6272         struct stmmac_priv *priv = netdev_priv(dev);
6273
6274         if (dev->netdev_ops != &stmmac_netdev_ops)
6275                 goto done;
6276
6277         switch (event) {
6278         case NETDEV_CHANGENAME:
6279                 if (priv->dbgfs_dir)
6280                         priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6281                                                          priv->dbgfs_dir,
6282                                                          stmmac_fs_dir,
6283                                                          dev->name);
6284                 break;
6285         }
6286 done:
6287         return NOTIFY_DONE;
6288 }
6289
6290 static struct notifier_block stmmac_notifier = {
6291         .notifier_call = stmmac_device_event,
6292 };
6293
6294 static void stmmac_init_fs(struct net_device *dev)
6295 {
6296         struct stmmac_priv *priv = netdev_priv(dev);
6297
6298         rtnl_lock();
6299
6300         /* Create per netdev entries */
6301         priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6302
6303         /* Entry to report DMA RX/TX rings */
6304         debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6305                             &stmmac_rings_status_fops);
6306
6307         /* Entry to report the DMA HW features */
6308         debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6309                             &stmmac_dma_cap_fops);
6310
6311         rtnl_unlock();
6312 }
6313
6314 static void stmmac_exit_fs(struct net_device *dev)
6315 {
6316         struct stmmac_priv *priv = netdev_priv(dev);
6317
6318         debugfs_remove_recursive(priv->dbgfs_dir);
6319 }
6320 #endif /* CONFIG_DEBUG_FS */
6321
6322 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6323 {
6324         unsigned char *data = (unsigned char *)&vid_le;
6325         unsigned char data_byte = 0;
6326         u32 crc = ~0x0;
6327         u32 temp = 0;
6328         int i, bits;
6329
6330         bits = get_bitmask_order(VLAN_VID_MASK);
6331         for (i = 0; i < bits; i++) {
6332                 if ((i % 8) == 0)
6333                         data_byte = data[i / 8];
6334
6335                 temp = ((crc & 1) ^ data_byte) & 1;
6336                 crc >>= 1;
6337                 data_byte >>= 1;
6338
6339                 if (temp)
6340                         crc ^= 0xedb88320;
6341         }
6342
6343         return crc;
6344 }
6345
6346 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6347 {
6348         u32 crc, hash = 0;
6349         __le16 pmatch = 0;
6350         int count = 0;
6351         u16 vid = 0;
6352
6353         for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6354                 __le16 vid_le = cpu_to_le16(vid);
6355                 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6356                 hash |= (1 << crc);
6357                 count++;
6358         }
6359
6360         if (!priv->dma_cap.vlhash) {
6361                 if (count > 2) /* VID = 0 always passes filter */
6362                         return -EOPNOTSUPP;
6363
6364                 pmatch = cpu_to_le16(vid);
6365                 hash = 0;
6366         }
6367
6368         return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6369 }
6370
6371 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6372 {
6373         struct stmmac_priv *priv = netdev_priv(ndev);
6374         bool is_double = false;
6375         int ret;
6376
6377         ret = pm_runtime_resume_and_get(priv->device);
6378         if (ret < 0)
6379                 return ret;
6380
6381         if (be16_to_cpu(proto) == ETH_P_8021AD)
6382                 is_double = true;
6383
6384         set_bit(vid, priv->active_vlans);
6385         ret = stmmac_vlan_update(priv, is_double);
6386         if (ret) {
6387                 clear_bit(vid, priv->active_vlans);
6388                 goto err_pm_put;
6389         }
6390
6391         if (priv->hw->num_vlan) {
6392                 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6393                 if (ret)
6394                         goto err_pm_put;
6395         }
6396 err_pm_put:
6397         pm_runtime_put(priv->device);
6398
6399         return ret;
6400 }
6401
6402 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6403 {
6404         struct stmmac_priv *priv = netdev_priv(ndev);
6405         bool is_double = false;
6406         int ret;
6407
6408         ret = pm_runtime_resume_and_get(priv->device);
6409         if (ret < 0)
6410                 return ret;
6411
6412         if (be16_to_cpu(proto) == ETH_P_8021AD)
6413                 is_double = true;
6414
6415         clear_bit(vid, priv->active_vlans);
6416
6417         if (priv->hw->num_vlan) {
6418                 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6419                 if (ret)
6420                         goto del_vlan_error;
6421         }
6422
6423         ret = stmmac_vlan_update(priv, is_double);
6424
6425 del_vlan_error:
6426         pm_runtime_put(priv->device);
6427
6428         return ret;
6429 }
6430
6431 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6432 {
6433         struct stmmac_priv *priv = netdev_priv(dev);
6434
6435         switch (bpf->command) {
6436         case XDP_SETUP_PROG:
6437                 return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6438         case XDP_SETUP_XSK_POOL:
6439                 return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6440                                              bpf->xsk.queue_id);
6441         default:
6442                 return -EOPNOTSUPP;
6443         }
6444 }
6445
6446 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6447                            struct xdp_frame **frames, u32 flags)
6448 {
6449         struct stmmac_priv *priv = netdev_priv(dev);
6450         int cpu = smp_processor_id();
6451         struct netdev_queue *nq;
6452         int i, nxmit = 0;
6453         int queue;
6454
6455         if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6456                 return -ENETDOWN;
6457
6458         if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6459                 return -EINVAL;
6460
6461         queue = stmmac_xdp_get_tx_queue(priv, cpu);
6462         nq = netdev_get_tx_queue(priv->dev, queue);
6463
6464         __netif_tx_lock(nq, cpu);
6465         /* Avoids TX time-out as we are sharing with slow path */
6466         txq_trans_cond_update(nq);
6467
6468         for (i = 0; i < num_frames; i++) {
6469                 int res;
6470
6471                 res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6472                 if (res == STMMAC_XDP_CONSUMED)
6473                         break;
6474
6475                 nxmit++;
6476         }
6477
6478         if (flags & XDP_XMIT_FLUSH) {
6479                 stmmac_flush_tx_descriptors(priv, queue);
6480                 stmmac_tx_timer_arm(priv, queue);
6481         }
6482
6483         __netif_tx_unlock(nq);
6484
6485         return nxmit;
6486 }
6487
6488 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6489 {
6490         struct stmmac_channel *ch = &priv->channel[queue];
6491         unsigned long flags;
6492
6493         spin_lock_irqsave(&ch->lock, flags);
6494         stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6495         spin_unlock_irqrestore(&ch->lock, flags);
6496
6497         stmmac_stop_rx_dma(priv, queue);
6498         __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6499 }
6500
6501 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6502 {
6503         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6504         struct stmmac_channel *ch = &priv->channel[queue];
6505         unsigned long flags;
6506         u32 buf_size;
6507         int ret;
6508
6509         ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6510         if (ret) {
6511                 netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6512                 return;
6513         }
6514
6515         ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6516         if (ret) {
6517                 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6518                 netdev_err(priv->dev, "Failed to init RX desc.\n");
6519                 return;
6520         }
6521
6522         stmmac_reset_rx_queue(priv, queue);
6523         stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6524
6525         stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6526                             rx_q->dma_rx_phy, rx_q->queue_index);
6527
6528         rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6529                              sizeof(struct dma_desc));
6530         stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6531                                rx_q->rx_tail_addr, rx_q->queue_index);
6532
6533         if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6534                 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6535                 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6536                                       buf_size,
6537                                       rx_q->queue_index);
6538         } else {
6539                 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6540                                       priv->dma_conf.dma_buf_sz,
6541                                       rx_q->queue_index);
6542         }
6543
6544         stmmac_start_rx_dma(priv, queue);
6545
6546         spin_lock_irqsave(&ch->lock, flags);
6547         stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6548         spin_unlock_irqrestore(&ch->lock, flags);
6549 }
6550
6551 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6552 {
6553         struct stmmac_channel *ch = &priv->channel[queue];
6554         unsigned long flags;
6555
6556         spin_lock_irqsave(&ch->lock, flags);
6557         stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6558         spin_unlock_irqrestore(&ch->lock, flags);
6559
6560         stmmac_stop_tx_dma(priv, queue);
6561         __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6562 }
6563
6564 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6565 {
6566         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6567         struct stmmac_channel *ch = &priv->channel[queue];
6568         unsigned long flags;
6569         int ret;
6570
6571         ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6572         if (ret) {
6573                 netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6574                 return;
6575         }
6576
6577         ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6578         if (ret) {
6579                 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6580                 netdev_err(priv->dev, "Failed to init TX desc.\n");
6581                 return;
6582         }
6583
6584         stmmac_reset_tx_queue(priv, queue);
6585         stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6586
6587         stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6588                             tx_q->dma_tx_phy, tx_q->queue_index);
6589
6590         if (tx_q->tbs & STMMAC_TBS_AVAIL)
6591                 stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6592
6593         tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6594         stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6595                                tx_q->tx_tail_addr, tx_q->queue_index);
6596
6597         stmmac_start_tx_dma(priv, queue);
6598
6599         spin_lock_irqsave(&ch->lock, flags);
6600         stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6601         spin_unlock_irqrestore(&ch->lock, flags);
6602 }
6603
6604 void stmmac_xdp_release(struct net_device *dev)
6605 {
6606         struct stmmac_priv *priv = netdev_priv(dev);
6607         u32 chan;
6608
6609         /* Ensure tx function is not running */
6610         netif_tx_disable(dev);
6611
6612         /* Disable NAPI process */
6613         stmmac_disable_all_queues(priv);
6614
6615         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6616                 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6617
6618         /* Free the IRQ lines */
6619         stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6620
6621         /* Stop TX/RX DMA channels */
6622         stmmac_stop_all_dma(priv);
6623
6624         /* Release and free the Rx/Tx resources */
6625         free_dma_desc_resources(priv, &priv->dma_conf);
6626
6627         /* Disable the MAC Rx/Tx */
6628         stmmac_mac_set(priv, priv->ioaddr, false);
6629
6630         /* set trans_start so we don't get spurious
6631          * watchdogs during reset
6632          */
6633         netif_trans_update(dev);
6634         netif_carrier_off(dev);
6635 }
6636
6637 int stmmac_xdp_open(struct net_device *dev)
6638 {
6639         struct stmmac_priv *priv = netdev_priv(dev);
6640         u32 rx_cnt = priv->plat->rx_queues_to_use;
6641         u32 tx_cnt = priv->plat->tx_queues_to_use;
6642         u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6643         struct stmmac_rx_queue *rx_q;
6644         struct stmmac_tx_queue *tx_q;
6645         u32 buf_size;
6646         bool sph_en;
6647         u32 chan;
6648         int ret;
6649
6650         ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6651         if (ret < 0) {
6652                 netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6653                            __func__);
6654                 goto dma_desc_error;
6655         }
6656
6657         ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6658         if (ret < 0) {
6659                 netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6660                            __func__);
6661                 goto init_error;
6662         }
6663
6664         stmmac_reset_queues_param(priv);
6665
6666         /* DMA CSR Channel configuration */
6667         for (chan = 0; chan < dma_csr_ch; chan++) {
6668                 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6669                 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6670         }
6671
6672         /* Adjust Split header */
6673         sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6674
6675         /* DMA RX Channel Configuration */
6676         for (chan = 0; chan < rx_cnt; chan++) {
6677                 rx_q = &priv->dma_conf.rx_queue[chan];
6678
6679                 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6680                                     rx_q->dma_rx_phy, chan);
6681
6682                 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6683                                      (rx_q->buf_alloc_num *
6684                                       sizeof(struct dma_desc));
6685                 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6686                                        rx_q->rx_tail_addr, chan);
6687
6688                 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6689                         buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6690                         stmmac_set_dma_bfsize(priv, priv->ioaddr,
6691                                               buf_size,
6692                                               rx_q->queue_index);
6693                 } else {
6694                         stmmac_set_dma_bfsize(priv, priv->ioaddr,
6695                                               priv->dma_conf.dma_buf_sz,
6696                                               rx_q->queue_index);
6697                 }
6698
6699                 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6700         }
6701
6702         /* DMA TX Channel Configuration */
6703         for (chan = 0; chan < tx_cnt; chan++) {
6704                 tx_q = &priv->dma_conf.tx_queue[chan];
6705
6706                 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6707                                     tx_q->dma_tx_phy, chan);
6708
6709                 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6710                 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6711                                        tx_q->tx_tail_addr, chan);
6712
6713                 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6714                 tx_q->txtimer.function = stmmac_tx_timer;
6715         }
6716
6717         /* Enable the MAC Rx/Tx */
6718         stmmac_mac_set(priv, priv->ioaddr, true);
6719
6720         /* Start Rx & Tx DMA Channels */
6721         stmmac_start_all_dma(priv);
6722
6723         ret = stmmac_request_irq(dev);
6724         if (ret)
6725                 goto irq_error;
6726
6727         /* Enable NAPI process*/
6728         stmmac_enable_all_queues(priv);
6729         netif_carrier_on(dev);
6730         netif_tx_start_all_queues(dev);
6731         stmmac_enable_all_dma_irq(priv);
6732
6733         return 0;
6734
6735 irq_error:
6736         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6737                 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6738
6739         stmmac_hw_teardown(dev);
6740 init_error:
6741         free_dma_desc_resources(priv, &priv->dma_conf);
6742 dma_desc_error:
6743         return ret;
6744 }
6745
6746 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6747 {
6748         struct stmmac_priv *priv = netdev_priv(dev);
6749         struct stmmac_rx_queue *rx_q;
6750         struct stmmac_tx_queue *tx_q;
6751         struct stmmac_channel *ch;
6752
6753         if (test_bit(STMMAC_DOWN, &priv->state) ||
6754             !netif_carrier_ok(priv->dev))
6755                 return -ENETDOWN;
6756
6757         if (!stmmac_xdp_is_enabled(priv))
6758                 return -EINVAL;
6759
6760         if (queue >= priv->plat->rx_queues_to_use ||
6761             queue >= priv->plat->tx_queues_to_use)
6762                 return -EINVAL;
6763
6764         rx_q = &priv->dma_conf.rx_queue[queue];
6765         tx_q = &priv->dma_conf.tx_queue[queue];
6766         ch = &priv->channel[queue];
6767
6768         if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6769                 return -EINVAL;
6770
6771         if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6772                 /* EQoS does not have per-DMA channel SW interrupt,
6773                  * so we schedule RX Napi straight-away.
6774                  */
6775                 if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6776                         __napi_schedule(&ch->rxtx_napi);
6777         }
6778
6779         return 0;
6780 }
6781
6782 static const struct net_device_ops stmmac_netdev_ops = {
6783         .ndo_open = stmmac_open,
6784         .ndo_start_xmit = stmmac_xmit,
6785         .ndo_stop = stmmac_release,
6786         .ndo_change_mtu = stmmac_change_mtu,
6787         .ndo_fix_features = stmmac_fix_features,
6788         .ndo_set_features = stmmac_set_features,
6789         .ndo_set_rx_mode = stmmac_set_rx_mode,
6790         .ndo_tx_timeout = stmmac_tx_timeout,
6791         .ndo_eth_ioctl = stmmac_ioctl,
6792         .ndo_setup_tc = stmmac_setup_tc,
6793         .ndo_select_queue = stmmac_select_queue,
6794 #ifdef CONFIG_NET_POLL_CONTROLLER
6795         .ndo_poll_controller = stmmac_poll_controller,
6796 #endif
6797         .ndo_set_mac_address = stmmac_set_mac_address,
6798         .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
6799         .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
6800         .ndo_bpf = stmmac_bpf,
6801         .ndo_xdp_xmit = stmmac_xdp_xmit,
6802         .ndo_xsk_wakeup = stmmac_xsk_wakeup,
6803 };
6804
6805 static void stmmac_reset_subtask(struct stmmac_priv *priv)
6806 {
6807         if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
6808                 return;
6809         if (test_bit(STMMAC_DOWN, &priv->state))
6810                 return;
6811
6812         netdev_err(priv->dev, "Reset adapter.\n");
6813
6814         rtnl_lock();
6815         netif_trans_update(priv->dev);
6816         while (test_and_set_bit(STMMAC_RESETING, &priv->state))
6817                 usleep_range(1000, 2000);
6818
6819         set_bit(STMMAC_DOWN, &priv->state);
6820         dev_close(priv->dev);
6821         dev_open(priv->dev, NULL);
6822         clear_bit(STMMAC_DOWN, &priv->state);
6823         clear_bit(STMMAC_RESETING, &priv->state);
6824         rtnl_unlock();
6825 }
6826
6827 static void stmmac_service_task(struct work_struct *work)
6828 {
6829         struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
6830                         service_task);
6831
6832         stmmac_reset_subtask(priv);
6833         clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
6834 }
6835
6836 /**
6837  *  stmmac_hw_init - Init the MAC device
6838  *  @priv: driver private structure
6839  *  Description: this function is to configure the MAC device according to
6840  *  some platform parameters or the HW capability register. It prepares the
6841  *  driver to use either ring or chain modes and to setup either enhanced or
6842  *  normal descriptors.
6843  */
6844 static int stmmac_hw_init(struct stmmac_priv *priv)
6845 {
6846         int ret;
6847
6848         /* dwmac-sun8i only work in chain mode */
6849         if (priv->plat->has_sun8i)
6850                 chain_mode = 1;
6851         priv->chain_mode = chain_mode;
6852
6853         /* Initialize HW Interface */
6854         ret = stmmac_hwif_init(priv);
6855         if (ret)
6856                 return ret;
6857
6858         /* Get the HW capability (new GMAC newer than 3.50a) */
6859         priv->hw_cap_support = stmmac_get_hw_features(priv);
6860         if (priv->hw_cap_support) {
6861                 dev_info(priv->device, "DMA HW capability register supported\n");
6862
6863                 /* We can override some gmac/dma configuration fields: e.g.
6864                  * enh_desc, tx_coe (e.g. that are passed through the
6865                  * platform) with the values from the HW capability
6866                  * register (if supported).
6867                  */
6868                 priv->plat->enh_desc = priv->dma_cap.enh_desc;
6869                 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
6870                                 !priv->plat->use_phy_wol;
6871                 priv->hw->pmt = priv->plat->pmt;
6872                 if (priv->dma_cap.hash_tb_sz) {
6873                         priv->hw->multicast_filter_bins =
6874                                         (BIT(priv->dma_cap.hash_tb_sz) << 5);
6875                         priv->hw->mcast_bits_log2 =
6876                                         ilog2(priv->hw->multicast_filter_bins);
6877                 }
6878
6879                 /* TXCOE doesn't work in thresh DMA mode */
6880                 if (priv->plat->force_thresh_dma_mode)
6881                         priv->plat->tx_coe = 0;
6882                 else
6883                         priv->plat->tx_coe = priv->dma_cap.tx_coe;
6884
6885                 /* In case of GMAC4 rx_coe is from HW cap register. */
6886                 priv->plat->rx_coe = priv->dma_cap.rx_coe;
6887
6888                 if (priv->dma_cap.rx_coe_type2)
6889                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
6890                 else if (priv->dma_cap.rx_coe_type1)
6891                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
6892
6893         } else {
6894                 dev_info(priv->device, "No HW DMA feature register supported\n");
6895         }
6896
6897         if (priv->plat->rx_coe) {
6898                 priv->hw->rx_csum = priv->plat->rx_coe;
6899                 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
6900                 if (priv->synopsys_id < DWMAC_CORE_4_00)
6901                         dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
6902         }
6903         if (priv->plat->tx_coe)
6904                 dev_info(priv->device, "TX Checksum insertion supported\n");
6905
6906         if (priv->plat->pmt) {
6907                 dev_info(priv->device, "Wake-Up On Lan supported\n");
6908                 device_set_wakeup_capable(priv->device, 1);
6909         }
6910
6911         if (priv->dma_cap.tsoen)
6912                 dev_info(priv->device, "TSO supported\n");
6913
6914         priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en;
6915         priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
6916
6917         /* Run HW quirks, if any */
6918         if (priv->hwif_quirks) {
6919                 ret = priv->hwif_quirks(priv);
6920                 if (ret)
6921                         return ret;
6922         }
6923
6924         /* Rx Watchdog is available in the COREs newer than the 3.40.
6925          * In some case, for example on bugged HW this feature
6926          * has to be disable and this can be done by passing the
6927          * riwt_off field from the platform.
6928          */
6929         if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
6930             (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
6931                 priv->use_riwt = 1;
6932                 dev_info(priv->device,
6933                          "Enable RX Mitigation via HW Watchdog Timer\n");
6934         }
6935
6936         return 0;
6937 }
6938
6939 static void stmmac_napi_add(struct net_device *dev)
6940 {
6941         struct stmmac_priv *priv = netdev_priv(dev);
6942         u32 queue, maxq;
6943
6944         maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
6945
6946         for (queue = 0; queue < maxq; queue++) {
6947                 struct stmmac_channel *ch = &priv->channel[queue];
6948
6949                 ch->priv_data = priv;
6950                 ch->index = queue;
6951                 spin_lock_init(&ch->lock);
6952
6953                 if (queue < priv->plat->rx_queues_to_use) {
6954                         netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
6955                 }
6956                 if (queue < priv->plat->tx_queues_to_use) {
6957                         netif_napi_add_tx(dev, &ch->tx_napi,
6958                                           stmmac_napi_poll_tx);
6959                 }
6960                 if (queue < priv->plat->rx_queues_to_use &&
6961                     queue < priv->plat->tx_queues_to_use) {
6962                         netif_napi_add(dev, &ch->rxtx_napi,
6963                                        stmmac_napi_poll_rxtx);
6964                 }
6965         }
6966 }
6967
6968 static void stmmac_napi_del(struct net_device *dev)
6969 {
6970         struct stmmac_priv *priv = netdev_priv(dev);
6971         u32 queue, maxq;
6972
6973         maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
6974
6975         for (queue = 0; queue < maxq; queue++) {
6976                 struct stmmac_channel *ch = &priv->channel[queue];
6977
6978                 if (queue < priv->plat->rx_queues_to_use)
6979                         netif_napi_del(&ch->rx_napi);
6980                 if (queue < priv->plat->tx_queues_to_use)
6981                         netif_napi_del(&ch->tx_napi);
6982                 if (queue < priv->plat->rx_queues_to_use &&
6983                     queue < priv->plat->tx_queues_to_use) {
6984                         netif_napi_del(&ch->rxtx_napi);
6985                 }
6986         }
6987 }
6988
6989 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
6990 {
6991         struct stmmac_priv *priv = netdev_priv(dev);
6992         int ret = 0, i;
6993
6994         if (netif_running(dev))
6995                 stmmac_release(dev);
6996
6997         stmmac_napi_del(dev);
6998
6999         priv->plat->rx_queues_to_use = rx_cnt;
7000         priv->plat->tx_queues_to_use = tx_cnt;
7001         if (!netif_is_rxfh_configured(dev))
7002                 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7003                         priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7004                                                                         rx_cnt);
7005
7006         stmmac_napi_add(dev);
7007
7008         if (netif_running(dev))
7009                 ret = stmmac_open(dev);
7010
7011         return ret;
7012 }
7013
7014 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7015 {
7016         struct stmmac_priv *priv = netdev_priv(dev);
7017         int ret = 0;
7018
7019         if (netif_running(dev))
7020                 stmmac_release(dev);
7021
7022         priv->dma_conf.dma_rx_size = rx_size;
7023         priv->dma_conf.dma_tx_size = tx_size;
7024
7025         if (netif_running(dev))
7026                 ret = stmmac_open(dev);
7027
7028         return ret;
7029 }
7030
7031 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
7032 static void stmmac_fpe_lp_task(struct work_struct *work)
7033 {
7034         struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7035                                                 fpe_task);
7036         struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
7037         enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
7038         enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
7039         bool *hs_enable = &fpe_cfg->hs_enable;
7040         bool *enable = &fpe_cfg->enable;
7041         int retries = 20;
7042
7043         while (retries-- > 0) {
7044                 /* Bail out immediately if FPE handshake is OFF */
7045                 if (*lo_state == FPE_STATE_OFF || !*hs_enable)
7046                         break;
7047
7048                 if (*lo_state == FPE_STATE_ENTERING_ON &&
7049                     *lp_state == FPE_STATE_ENTERING_ON) {
7050                         stmmac_fpe_configure(priv, priv->ioaddr,
7051                                              priv->plat->tx_queues_to_use,
7052                                              priv->plat->rx_queues_to_use,
7053                                              *enable);
7054
7055                         netdev_info(priv->dev, "configured FPE\n");
7056
7057                         *lo_state = FPE_STATE_ON;
7058                         *lp_state = FPE_STATE_ON;
7059                         netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
7060                         break;
7061                 }
7062
7063                 if ((*lo_state == FPE_STATE_CAPABLE ||
7064                      *lo_state == FPE_STATE_ENTERING_ON) &&
7065                      *lp_state != FPE_STATE_ON) {
7066                         netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
7067                                     *lo_state, *lp_state);
7068                         stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7069                                                 MPACKET_VERIFY);
7070                 }
7071                 /* Sleep then retry */
7072                 msleep(500);
7073         }
7074
7075         clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
7076 }
7077
7078 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
7079 {
7080         if (priv->plat->fpe_cfg->hs_enable != enable) {
7081                 if (enable) {
7082                         stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7083                                                 MPACKET_VERIFY);
7084                 } else {
7085                         priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
7086                         priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
7087                 }
7088
7089                 priv->plat->fpe_cfg->hs_enable = enable;
7090         }
7091 }
7092
7093 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7094 {
7095         const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7096         struct dma_desc *desc_contains_ts = ctx->desc;
7097         struct stmmac_priv *priv = ctx->priv;
7098         struct dma_desc *ndesc = ctx->ndesc;
7099         struct dma_desc *desc = ctx->desc;
7100         u64 ns = 0;
7101
7102         if (!priv->hwts_rx_en)
7103                 return -ENODATA;
7104
7105         /* For GMAC4, the valid timestamp is from CTX next desc. */
7106         if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7107                 desc_contains_ts = ndesc;
7108
7109         /* Check if timestamp is available */
7110         if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7111                 stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7112                 ns -= priv->plat->cdc_error_adj;
7113                 *timestamp = ns_to_ktime(ns);
7114                 return 0;
7115         }
7116
7117         return -ENODATA;
7118 }
7119
7120 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7121         .xmo_rx_timestamp               = stmmac_xdp_rx_timestamp,
7122 };
7123
7124 /**
7125  * stmmac_dvr_probe
7126  * @device: device pointer
7127  * @plat_dat: platform data pointer
7128  * @res: stmmac resource pointer
7129  * Description: this is the main probe function used to
7130  * call the alloc_etherdev, allocate the priv structure.
7131  * Return:
7132  * returns 0 on success, otherwise errno.
7133  */
7134 int stmmac_dvr_probe(struct device *device,
7135                      struct plat_stmmacenet_data *plat_dat,
7136                      struct stmmac_resources *res)
7137 {
7138         struct net_device *ndev = NULL;
7139         struct stmmac_priv *priv;
7140         u32 rxq;
7141         int i, ret = 0;
7142
7143         ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7144                                        MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7145         if (!ndev)
7146                 return -ENOMEM;
7147
7148         SET_NETDEV_DEV(ndev, device);
7149
7150         priv = netdev_priv(ndev);
7151         priv->device = device;
7152         priv->dev = ndev;
7153
7154         stmmac_set_ethtool_ops(ndev);
7155         priv->pause = pause;
7156         priv->plat = plat_dat;
7157         priv->ioaddr = res->addr;
7158         priv->dev->base_addr = (unsigned long)res->addr;
7159         priv->plat->dma_cfg->multi_msi_en = priv->plat->multi_msi_en;
7160
7161         priv->dev->irq = res->irq;
7162         priv->wol_irq = res->wol_irq;
7163         priv->lpi_irq = res->lpi_irq;
7164         priv->sfty_ce_irq = res->sfty_ce_irq;
7165         priv->sfty_ue_irq = res->sfty_ue_irq;
7166         for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7167                 priv->rx_irq[i] = res->rx_irq[i];
7168         for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7169                 priv->tx_irq[i] = res->tx_irq[i];
7170
7171         if (!is_zero_ether_addr(res->mac))
7172                 eth_hw_addr_set(priv->dev, res->mac);
7173
7174         dev_set_drvdata(device, priv->dev);
7175
7176         /* Verify driver arguments */
7177         stmmac_verify_args();
7178
7179         priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7180         if (!priv->af_xdp_zc_qps)
7181                 return -ENOMEM;
7182
7183         /* Allocate workqueue */
7184         priv->wq = create_singlethread_workqueue("stmmac_wq");
7185         if (!priv->wq) {
7186                 dev_err(priv->device, "failed to create workqueue\n");
7187                 ret = -ENOMEM;
7188                 goto error_wq_init;
7189         }
7190
7191         INIT_WORK(&priv->service_task, stmmac_service_task);
7192
7193         /* Initialize Link Partner FPE workqueue */
7194         INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
7195
7196         /* Override with kernel parameters if supplied XXX CRS XXX
7197          * this needs to have multiple instances
7198          */
7199         if ((phyaddr >= 0) && (phyaddr <= 31))
7200                 priv->plat->phy_addr = phyaddr;
7201
7202         if (priv->plat->stmmac_rst) {
7203                 ret = reset_control_assert(priv->plat->stmmac_rst);
7204                 reset_control_deassert(priv->plat->stmmac_rst);
7205                 /* Some reset controllers have only reset callback instead of
7206                  * assert + deassert callbacks pair.
7207                  */
7208                 if (ret == -ENOTSUPP)
7209                         reset_control_reset(priv->plat->stmmac_rst);
7210         }
7211
7212         ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7213         if (ret == -ENOTSUPP)
7214                 dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7215                         ERR_PTR(ret));
7216
7217         /* Init MAC and get the capabilities */
7218         ret = stmmac_hw_init(priv);
7219         if (ret)
7220                 goto error_hw_init;
7221
7222         /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7223          */
7224         if (priv->synopsys_id < DWMAC_CORE_5_20)
7225                 priv->plat->dma_cfg->dche = false;
7226
7227         stmmac_check_ether_addr(priv);
7228
7229         ndev->netdev_ops = &stmmac_netdev_ops;
7230
7231         ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7232
7233         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7234                             NETIF_F_RXCSUM;
7235         ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7236                              NETDEV_XDP_ACT_XSK_ZEROCOPY;
7237
7238         ret = stmmac_tc_init(priv, priv);
7239         if (!ret) {
7240                 ndev->hw_features |= NETIF_F_HW_TC;
7241         }
7242
7243         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
7244                 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7245                 if (priv->plat->has_gmac4)
7246                         ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7247                 priv->tso = true;
7248                 dev_info(priv->device, "TSO feature enabled\n");
7249         }
7250
7251         if (priv->dma_cap.sphen && !priv->plat->sph_disable) {
7252                 ndev->hw_features |= NETIF_F_GRO;
7253                 priv->sph_cap = true;
7254                 priv->sph = priv->sph_cap;
7255                 dev_info(priv->device, "SPH feature enabled\n");
7256         }
7257
7258         /* Ideally our host DMA address width is the same as for the
7259          * device. However, it may differ and then we have to use our
7260          * host DMA width for allocation and the device DMA width for
7261          * register handling.
7262          */
7263         if (priv->plat->host_dma_width)
7264                 priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7265         else
7266                 priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7267
7268         if (priv->dma_cap.host_dma_width) {
7269                 ret = dma_set_mask_and_coherent(device,
7270                                 DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7271                 if (!ret) {
7272                         dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7273                                  priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7274
7275                         /*
7276                          * If more than 32 bits can be addressed, make sure to
7277                          * enable enhanced addressing mode.
7278                          */
7279                         if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7280                                 priv->plat->dma_cfg->eame = true;
7281                 } else {
7282                         ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7283                         if (ret) {
7284                                 dev_err(priv->device, "Failed to set DMA Mask\n");
7285                                 goto error_hw_init;
7286                         }
7287
7288                         priv->dma_cap.host_dma_width = 32;
7289                 }
7290         }
7291
7292         ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7293         ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7294 #ifdef STMMAC_VLAN_TAG_USED
7295         /* Both mac100 and gmac support receive VLAN tag detection */
7296         ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7297         if (priv->dma_cap.vlhash) {
7298                 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7299                 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7300         }
7301         if (priv->dma_cap.vlins) {
7302                 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7303                 if (priv->dma_cap.dvlan)
7304                         ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7305         }
7306 #endif
7307         priv->msg_enable = netif_msg_init(debug, default_msg_level);
7308
7309         /* Initialize RSS */
7310         rxq = priv->plat->rx_queues_to_use;
7311         netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7312         for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7313                 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7314
7315         if (priv->dma_cap.rssen && priv->plat->rss_en)
7316                 ndev->features |= NETIF_F_RXHASH;
7317
7318         ndev->vlan_features |= ndev->features;
7319         /* TSO doesn't work on VLANs yet */
7320         ndev->vlan_features &= ~NETIF_F_TSO;
7321
7322         /* MTU range: 46 - hw-specific max */
7323         ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7324         if (priv->plat->has_xgmac)
7325                 ndev->max_mtu = XGMAC_JUMBO_LEN;
7326         else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7327                 ndev->max_mtu = JUMBO_LEN;
7328         else
7329                 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7330         /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7331          * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7332          */
7333         if ((priv->plat->maxmtu < ndev->max_mtu) &&
7334             (priv->plat->maxmtu >= ndev->min_mtu))
7335                 ndev->max_mtu = priv->plat->maxmtu;
7336         else if (priv->plat->maxmtu < ndev->min_mtu)
7337                 dev_warn(priv->device,
7338                          "%s: warning: maxmtu having invalid value (%d)\n",
7339                          __func__, priv->plat->maxmtu);
7340
7341         if (flow_ctrl)
7342                 priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
7343
7344         ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7345
7346         /* Setup channels NAPI */
7347         stmmac_napi_add(ndev);
7348
7349         mutex_init(&priv->lock);
7350
7351         /* If a specific clk_csr value is passed from the platform
7352          * this means that the CSR Clock Range selection cannot be
7353          * changed at run-time and it is fixed. Viceversa the driver'll try to
7354          * set the MDC clock dynamically according to the csr actual
7355          * clock input.
7356          */
7357         if (priv->plat->clk_csr >= 0)
7358                 priv->clk_csr = priv->plat->clk_csr;
7359         else
7360                 stmmac_clk_csr_set(priv);
7361
7362         stmmac_check_pcs_mode(priv);
7363
7364         pm_runtime_get_noresume(device);
7365         pm_runtime_set_active(device);
7366         if (!pm_runtime_enabled(device))
7367                 pm_runtime_enable(device);
7368
7369         if (priv->hw->pcs != STMMAC_PCS_TBI &&
7370             priv->hw->pcs != STMMAC_PCS_RTBI) {
7371                 /* MDIO bus Registration */
7372                 ret = stmmac_mdio_register(ndev);
7373                 if (ret < 0) {
7374                         dev_err_probe(priv->device, ret,
7375                                       "%s: MDIO bus (id: %d) registration failed\n",
7376                                       __func__, priv->plat->bus_id);
7377                         goto error_mdio_register;
7378                 }
7379         }
7380
7381         if (priv->plat->speed_mode_2500)
7382                 priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7383
7384         if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7385                 ret = stmmac_xpcs_setup(priv->mii);
7386                 if (ret)
7387                         goto error_xpcs_setup;
7388         }
7389
7390         ret = stmmac_phy_setup(priv);
7391         if (ret) {
7392                 netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7393                 goto error_phy_setup;
7394         }
7395
7396         ret = register_netdev(ndev);
7397         if (ret) {
7398                 dev_err(priv->device, "%s: ERROR %i registering the device\n",
7399                         __func__, ret);
7400                 goto error_netdev_register;
7401         }
7402
7403 #ifdef CONFIG_DEBUG_FS
7404         stmmac_init_fs(ndev);
7405 #endif
7406
7407         if (priv->plat->dump_debug_regs)
7408                 priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7409
7410         /* Let pm_runtime_put() disable the clocks.
7411          * If CONFIG_PM is not enabled, the clocks will stay powered.
7412          */
7413         pm_runtime_put(device);
7414
7415         return ret;
7416
7417 error_netdev_register:
7418         phylink_destroy(priv->phylink);
7419 error_xpcs_setup:
7420 error_phy_setup:
7421         if (priv->hw->pcs != STMMAC_PCS_TBI &&
7422             priv->hw->pcs != STMMAC_PCS_RTBI)
7423                 stmmac_mdio_unregister(ndev);
7424 error_mdio_register:
7425         stmmac_napi_del(ndev);
7426 error_hw_init:
7427         destroy_workqueue(priv->wq);
7428 error_wq_init:
7429         bitmap_free(priv->af_xdp_zc_qps);
7430
7431         return ret;
7432 }
7433 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7434
7435 /**
7436  * stmmac_dvr_remove
7437  * @dev: device pointer
7438  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7439  * changes the link status, releases the DMA descriptor rings.
7440  */
7441 void stmmac_dvr_remove(struct device *dev)
7442 {
7443         struct net_device *ndev = dev_get_drvdata(dev);
7444         struct stmmac_priv *priv = netdev_priv(ndev);
7445
7446         netdev_info(priv->dev, "%s: removing driver", __func__);
7447
7448         pm_runtime_get_sync(dev);
7449
7450         stmmac_stop_all_dma(priv);
7451         stmmac_mac_set(priv, priv->ioaddr, false);
7452         netif_carrier_off(ndev);
7453         unregister_netdev(ndev);
7454
7455         /* Serdes power down needs to happen after VLAN filter
7456          * is deleted that is triggered by unregister_netdev().
7457          */
7458         if (priv->plat->serdes_powerdown)
7459                 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7460
7461 #ifdef CONFIG_DEBUG_FS
7462         stmmac_exit_fs(ndev);
7463 #endif
7464         phylink_destroy(priv->phylink);
7465         if (priv->plat->stmmac_rst)
7466                 reset_control_assert(priv->plat->stmmac_rst);
7467         reset_control_assert(priv->plat->stmmac_ahb_rst);
7468         if (priv->hw->pcs != STMMAC_PCS_TBI &&
7469             priv->hw->pcs != STMMAC_PCS_RTBI)
7470                 stmmac_mdio_unregister(ndev);
7471         destroy_workqueue(priv->wq);
7472         mutex_destroy(&priv->lock);
7473         bitmap_free(priv->af_xdp_zc_qps);
7474
7475         pm_runtime_disable(dev);
7476         pm_runtime_put_noidle(dev);
7477 }
7478 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7479
7480 /**
7481  * stmmac_suspend - suspend callback
7482  * @dev: device pointer
7483  * Description: this is the function to suspend the device and it is called
7484  * by the platform driver to stop the network queue, release the resources,
7485  * program the PMT register (for WoL), clean and release driver resources.
7486  */
7487 int stmmac_suspend(struct device *dev)
7488 {
7489         struct net_device *ndev = dev_get_drvdata(dev);
7490         struct stmmac_priv *priv = netdev_priv(ndev);
7491         u32 chan;
7492
7493         if (!ndev || !netif_running(ndev))
7494                 return 0;
7495
7496         mutex_lock(&priv->lock);
7497
7498         netif_device_detach(ndev);
7499
7500         stmmac_disable_all_queues(priv);
7501
7502         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7503                 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7504
7505         if (priv->eee_enabled) {
7506                 priv->tx_path_in_lpi_mode = false;
7507                 del_timer_sync(&priv->eee_ctrl_timer);
7508         }
7509
7510         /* Stop TX/RX DMA */
7511         stmmac_stop_all_dma(priv);
7512
7513         if (priv->plat->serdes_powerdown)
7514                 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7515
7516         /* Enable Power down mode by programming the PMT regs */
7517         if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7518                 stmmac_pmt(priv, priv->hw, priv->wolopts);
7519                 priv->irq_wake = 1;
7520         } else {
7521                 stmmac_mac_set(priv, priv->ioaddr, false);
7522                 pinctrl_pm_select_sleep_state(priv->device);
7523         }
7524
7525         mutex_unlock(&priv->lock);
7526
7527         rtnl_lock();
7528         if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7529                 phylink_suspend(priv->phylink, true);
7530         } else {
7531                 if (device_may_wakeup(priv->device))
7532                         phylink_speed_down(priv->phylink, false);
7533                 phylink_suspend(priv->phylink, false);
7534         }
7535         rtnl_unlock();
7536
7537         if (priv->dma_cap.fpesel) {
7538                 /* Disable FPE */
7539                 stmmac_fpe_configure(priv, priv->ioaddr,
7540                                      priv->plat->tx_queues_to_use,
7541                                      priv->plat->rx_queues_to_use, false);
7542
7543                 stmmac_fpe_handshake(priv, false);
7544                 stmmac_fpe_stop_wq(priv);
7545         }
7546
7547         priv->speed = SPEED_UNKNOWN;
7548         return 0;
7549 }
7550 EXPORT_SYMBOL_GPL(stmmac_suspend);
7551
7552 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7553 {
7554         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7555
7556         rx_q->cur_rx = 0;
7557         rx_q->dirty_rx = 0;
7558 }
7559
7560 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7561 {
7562         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7563
7564         tx_q->cur_tx = 0;
7565         tx_q->dirty_tx = 0;
7566         tx_q->mss = 0;
7567
7568         netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7569 }
7570
7571 /**
7572  * stmmac_reset_queues_param - reset queue parameters
7573  * @priv: device pointer
7574  */
7575 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7576 {
7577         u32 rx_cnt = priv->plat->rx_queues_to_use;
7578         u32 tx_cnt = priv->plat->tx_queues_to_use;
7579         u32 queue;
7580
7581         for (queue = 0; queue < rx_cnt; queue++)
7582                 stmmac_reset_rx_queue(priv, queue);
7583
7584         for (queue = 0; queue < tx_cnt; queue++)
7585                 stmmac_reset_tx_queue(priv, queue);
7586 }
7587
7588 /**
7589  * stmmac_resume - resume callback
7590  * @dev: device pointer
7591  * Description: when resume this function is invoked to setup the DMA and CORE
7592  * in a usable state.
7593  */
7594 int stmmac_resume(struct device *dev)
7595 {
7596         struct net_device *ndev = dev_get_drvdata(dev);
7597         struct stmmac_priv *priv = netdev_priv(ndev);
7598         int ret;
7599
7600         if (!netif_running(ndev))
7601                 return 0;
7602
7603         /* Power Down bit, into the PM register, is cleared
7604          * automatically as soon as a magic packet or a Wake-up frame
7605          * is received. Anyway, it's better to manually clear
7606          * this bit because it can generate problems while resuming
7607          * from another devices (e.g. serial console).
7608          */
7609         if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7610                 mutex_lock(&priv->lock);
7611                 stmmac_pmt(priv, priv->hw, 0);
7612                 mutex_unlock(&priv->lock);
7613                 priv->irq_wake = 0;
7614         } else {
7615                 pinctrl_pm_select_default_state(priv->device);
7616                 /* reset the phy so that it's ready */
7617                 if (priv->mii)
7618                         stmmac_mdio_reset(priv->mii);
7619         }
7620
7621         if (!priv->plat->serdes_up_after_phy_linkup && priv->plat->serdes_powerup) {
7622                 ret = priv->plat->serdes_powerup(ndev,
7623                                                  priv->plat->bsp_priv);
7624
7625                 if (ret < 0)
7626                         return ret;
7627         }
7628
7629         rtnl_lock();
7630         if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7631                 phylink_resume(priv->phylink);
7632         } else {
7633                 phylink_resume(priv->phylink);
7634                 if (device_may_wakeup(priv->device))
7635                         phylink_speed_up(priv->phylink);
7636         }
7637         rtnl_unlock();
7638
7639         rtnl_lock();
7640         mutex_lock(&priv->lock);
7641
7642         stmmac_reset_queues_param(priv);
7643
7644         stmmac_free_tx_skbufs(priv);
7645         stmmac_clear_descriptors(priv, &priv->dma_conf);
7646
7647         stmmac_hw_setup(ndev, false);
7648         stmmac_init_coalesce(priv);
7649         stmmac_set_rx_mode(ndev);
7650
7651         stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7652
7653         stmmac_enable_all_queues(priv);
7654         stmmac_enable_all_dma_irq(priv);
7655
7656         mutex_unlock(&priv->lock);
7657         rtnl_unlock();
7658
7659         netif_device_attach(ndev);
7660
7661         return 0;
7662 }
7663 EXPORT_SYMBOL_GPL(stmmac_resume);
7664
7665 #ifndef MODULE
7666 static int __init stmmac_cmdline_opt(char *str)
7667 {
7668         char *opt;
7669
7670         if (!str || !*str)
7671                 return 1;
7672         while ((opt = strsep(&str, ",")) != NULL) {
7673                 if (!strncmp(opt, "debug:", 6)) {
7674                         if (kstrtoint(opt + 6, 0, &debug))
7675                                 goto err;
7676                 } else if (!strncmp(opt, "phyaddr:", 8)) {
7677                         if (kstrtoint(opt + 8, 0, &phyaddr))
7678                                 goto err;
7679                 } else if (!strncmp(opt, "buf_sz:", 7)) {
7680                         if (kstrtoint(opt + 7, 0, &buf_sz))
7681                                 goto err;
7682                 } else if (!strncmp(opt, "tc:", 3)) {
7683                         if (kstrtoint(opt + 3, 0, &tc))
7684                                 goto err;
7685                 } else if (!strncmp(opt, "watchdog:", 9)) {
7686                         if (kstrtoint(opt + 9, 0, &watchdog))
7687                                 goto err;
7688                 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
7689                         if (kstrtoint(opt + 10, 0, &flow_ctrl))
7690                                 goto err;
7691                 } else if (!strncmp(opt, "pause:", 6)) {
7692                         if (kstrtoint(opt + 6, 0, &pause))
7693                                 goto err;
7694                 } else if (!strncmp(opt, "eee_timer:", 10)) {
7695                         if (kstrtoint(opt + 10, 0, &eee_timer))
7696                                 goto err;
7697                 } else if (!strncmp(opt, "chain_mode:", 11)) {
7698                         if (kstrtoint(opt + 11, 0, &chain_mode))
7699                                 goto err;
7700                 }
7701         }
7702         return 1;
7703
7704 err:
7705         pr_err("%s: ERROR broken module parameter conversion", __func__);
7706         return 1;
7707 }
7708
7709 __setup("stmmaceth=", stmmac_cmdline_opt);
7710 #endif /* MODULE */
7711
7712 static int __init stmmac_init(void)
7713 {
7714 #ifdef CONFIG_DEBUG_FS
7715         /* Create debugfs main directory if it doesn't exist yet */
7716         if (!stmmac_fs_dir)
7717                 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7718         register_netdevice_notifier(&stmmac_notifier);
7719 #endif
7720
7721         return 0;
7722 }
7723
7724 static void __exit stmmac_exit(void)
7725 {
7726 #ifdef CONFIG_DEBUG_FS
7727         unregister_netdevice_notifier(&stmmac_notifier);
7728         debugfs_remove_recursive(stmmac_fs_dir);
7729 #endif
7730 }
7731
7732 module_init(stmmac_init)
7733 module_exit(stmmac_exit)
7734
7735 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7736 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7737 MODULE_LICENSE("GPL");