powerpc/mm: Avoid calling arch_enter/leave_lazy_mmu() in set_ptes
[platform/kernel/linux-starfive.git] / drivers / net / ethernet / stmicro / stmmac / dwxgmac2_dma.c
1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /*
3  * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
4  * stmmac XGMAC support.
5  */
6
7 #include <linux/iopoll.h>
8 #include "stmmac.h"
9 #include "dwxgmac2.h"
10
11 static int dwxgmac2_dma_reset(void __iomem *ioaddr)
12 {
13         u32 value = readl(ioaddr + XGMAC_DMA_MODE);
14
15         /* DMA SW reset */
16         writel(value | XGMAC_SWR, ioaddr + XGMAC_DMA_MODE);
17
18         return readl_poll_timeout(ioaddr + XGMAC_DMA_MODE, value,
19                                   !(value & XGMAC_SWR), 0, 100000);
20 }
21
22 static void dwxgmac2_dma_init(void __iomem *ioaddr,
23                               struct stmmac_dma_cfg *dma_cfg, int atds)
24 {
25         u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE);
26
27         if (dma_cfg->aal)
28                 value |= XGMAC_AAL;
29
30         if (dma_cfg->eame)
31                 value |= XGMAC_EAME;
32
33         writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE);
34 }
35
36 static void dwxgmac2_dma_init_chan(struct stmmac_priv *priv,
37                                    void __iomem *ioaddr,
38                                    struct stmmac_dma_cfg *dma_cfg, u32 chan)
39 {
40         u32 value = readl(ioaddr + XGMAC_DMA_CH_CONTROL(chan));
41
42         if (dma_cfg->pblx8)
43                 value |= XGMAC_PBLx8;
44
45         writel(value, ioaddr + XGMAC_DMA_CH_CONTROL(chan));
46         writel(XGMAC_DMA_INT_DEFAULT_EN, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
47 }
48
49 static void dwxgmac2_dma_init_rx_chan(struct stmmac_priv *priv,
50                                       void __iomem *ioaddr,
51                                       struct stmmac_dma_cfg *dma_cfg,
52                                       dma_addr_t phy, u32 chan)
53 {
54         u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
55         u32 value;
56
57         value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
58         value &= ~XGMAC_RxPBL;
59         value |= (rxpbl << XGMAC_RxPBL_SHIFT) & XGMAC_RxPBL;
60         writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
61
62         writel(upper_32_bits(phy), ioaddr + XGMAC_DMA_CH_RxDESC_HADDR(chan));
63         writel(lower_32_bits(phy), ioaddr + XGMAC_DMA_CH_RxDESC_LADDR(chan));
64 }
65
66 static void dwxgmac2_dma_init_tx_chan(struct stmmac_priv *priv,
67                                       void __iomem *ioaddr,
68                                       struct stmmac_dma_cfg *dma_cfg,
69                                       dma_addr_t phy, u32 chan)
70 {
71         u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
72         u32 value;
73
74         value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
75         value &= ~XGMAC_TxPBL;
76         value |= (txpbl << XGMAC_TxPBL_SHIFT) & XGMAC_TxPBL;
77         value |= XGMAC_OSP;
78         writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
79
80         writel(upper_32_bits(phy), ioaddr + XGMAC_DMA_CH_TxDESC_HADDR(chan));
81         writel(lower_32_bits(phy), ioaddr + XGMAC_DMA_CH_TxDESC_LADDR(chan));
82 }
83
84 static void dwxgmac2_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
85 {
86         u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE);
87         int i;
88
89         if (axi->axi_lpi_en)
90                 value |= XGMAC_EN_LPI;
91         if (axi->axi_xit_frm)
92                 value |= XGMAC_LPI_XIT_PKT;
93
94         value &= ~XGMAC_WR_OSR_LMT;
95         value |= (axi->axi_wr_osr_lmt << XGMAC_WR_OSR_LMT_SHIFT) &
96                 XGMAC_WR_OSR_LMT;
97
98         value &= ~XGMAC_RD_OSR_LMT;
99         value |= (axi->axi_rd_osr_lmt << XGMAC_RD_OSR_LMT_SHIFT) &
100                 XGMAC_RD_OSR_LMT;
101
102         if (!axi->axi_fb)
103                 value |= XGMAC_UNDEF;
104
105         value &= ~XGMAC_BLEN;
106         for (i = 0; i < AXI_BLEN; i++) {
107                 switch (axi->axi_blen[i]) {
108                 case 256:
109                         value |= XGMAC_BLEN256;
110                         break;
111                 case 128:
112                         value |= XGMAC_BLEN128;
113                         break;
114                 case 64:
115                         value |= XGMAC_BLEN64;
116                         break;
117                 case 32:
118                         value |= XGMAC_BLEN32;
119                         break;
120                 case 16:
121                         value |= XGMAC_BLEN16;
122                         break;
123                 case 8:
124                         value |= XGMAC_BLEN8;
125                         break;
126                 case 4:
127                         value |= XGMAC_BLEN4;
128                         break;
129                 }
130         }
131
132         writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE);
133         writel(XGMAC_TDPS, ioaddr + XGMAC_TX_EDMA_CTRL);
134         writel(XGMAC_RDPS, ioaddr + XGMAC_RX_EDMA_CTRL);
135 }
136
137 static void dwxgmac2_dma_dump_regs(struct stmmac_priv *priv,
138                                    void __iomem *ioaddr, u32 *reg_space)
139 {
140         int i;
141
142         for (i = (XGMAC_DMA_MODE / 4); i < XGMAC_REGSIZE; i++)
143                 reg_space[i] = readl(ioaddr + i * 4);
144 }
145
146 static void dwxgmac2_dma_rx_mode(struct stmmac_priv *priv, void __iomem *ioaddr,
147                                  int mode, u32 channel, int fifosz, u8 qmode)
148 {
149         u32 value = readl(ioaddr + XGMAC_MTL_RXQ_OPMODE(channel));
150         unsigned int rqs = fifosz / 256 - 1;
151
152         if (mode == SF_DMA_MODE) {
153                 value |= XGMAC_RSF;
154         } else {
155                 value &= ~XGMAC_RSF;
156                 value &= ~XGMAC_RTC;
157
158                 if (mode <= 64)
159                         value |= 0x0 << XGMAC_RTC_SHIFT;
160                 else if (mode <= 96)
161                         value |= 0x2 << XGMAC_RTC_SHIFT;
162                 else
163                         value |= 0x3 << XGMAC_RTC_SHIFT;
164         }
165
166         value &= ~XGMAC_RQS;
167         value |= (rqs << XGMAC_RQS_SHIFT) & XGMAC_RQS;
168
169         if ((fifosz >= 4096) && (qmode != MTL_QUEUE_AVB)) {
170                 u32 flow = readl(ioaddr + XGMAC_MTL_RXQ_FLOW_CONTROL(channel));
171                 unsigned int rfd, rfa;
172
173                 value |= XGMAC_EHFC;
174
175                 /* Set Threshold for Activating Flow Control to min 2 frames,
176                  * i.e. 1500 * 2 = 3000 bytes.
177                  *
178                  * Set Threshold for Deactivating Flow Control to min 1 frame,
179                  * i.e. 1500 bytes.
180                  */
181                 switch (fifosz) {
182                 case 4096:
183                         /* This violates the above formula because of FIFO size
184                          * limit therefore overflow may occur in spite of this.
185                          */
186                         rfd = 0x03; /* Full-2.5K */
187                         rfa = 0x01; /* Full-1.5K */
188                         break;
189
190                 default:
191                         rfd = 0x07; /* Full-4.5K */
192                         rfa = 0x04; /* Full-3K */
193                         break;
194                 }
195
196                 flow &= ~XGMAC_RFD;
197                 flow |= rfd << XGMAC_RFD_SHIFT;
198
199                 flow &= ~XGMAC_RFA;
200                 flow |= rfa << XGMAC_RFA_SHIFT;
201
202                 writel(flow, ioaddr + XGMAC_MTL_RXQ_FLOW_CONTROL(channel));
203         }
204
205         writel(value, ioaddr + XGMAC_MTL_RXQ_OPMODE(channel));
206
207         /* Enable MTL RX overflow */
208         value = readl(ioaddr + XGMAC_MTL_QINTEN(channel));
209         writel(value | XGMAC_RXOIE, ioaddr + XGMAC_MTL_QINTEN(channel));
210 }
211
212 static void dwxgmac2_dma_tx_mode(struct stmmac_priv *priv, void __iomem *ioaddr,
213                                  int mode, u32 channel, int fifosz, u8 qmode)
214 {
215         u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
216         unsigned int tqs = fifosz / 256 - 1;
217
218         if (mode == SF_DMA_MODE) {
219                 value |= XGMAC_TSF;
220         } else {
221                 value &= ~XGMAC_TSF;
222                 value &= ~XGMAC_TTC;
223
224                 if (mode <= 64)
225                         value |= 0x0 << XGMAC_TTC_SHIFT;
226                 else if (mode <= 96)
227                         value |= 0x2 << XGMAC_TTC_SHIFT;
228                 else if (mode <= 128)
229                         value |= 0x3 << XGMAC_TTC_SHIFT;
230                 else if (mode <= 192)
231                         value |= 0x4 << XGMAC_TTC_SHIFT;
232                 else if (mode <= 256)
233                         value |= 0x5 << XGMAC_TTC_SHIFT;
234                 else if (mode <= 384)
235                         value |= 0x6 << XGMAC_TTC_SHIFT;
236                 else
237                         value |= 0x7 << XGMAC_TTC_SHIFT;
238         }
239
240         /* Use static TC to Queue mapping */
241         value |= (channel << XGMAC_Q2TCMAP_SHIFT) & XGMAC_Q2TCMAP;
242
243         value &= ~XGMAC_TXQEN;
244         if (qmode != MTL_QUEUE_AVB)
245                 value |= 0x2 << XGMAC_TXQEN_SHIFT;
246         else
247                 value |= 0x1 << XGMAC_TXQEN_SHIFT;
248
249         value &= ~XGMAC_TQS;
250         value |= (tqs << XGMAC_TQS_SHIFT) & XGMAC_TQS;
251
252         writel(value, ioaddr +  XGMAC_MTL_TXQ_OPMODE(channel));
253 }
254
255 static void dwxgmac2_enable_dma_irq(struct stmmac_priv *priv,
256                                     void __iomem *ioaddr, u32 chan,
257                                     bool rx, bool tx)
258 {
259         u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
260
261         if (rx)
262                 value |= XGMAC_DMA_INT_DEFAULT_RX;
263         if (tx)
264                 value |= XGMAC_DMA_INT_DEFAULT_TX;
265
266         writel(value, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
267 }
268
269 static void dwxgmac2_disable_dma_irq(struct stmmac_priv *priv,
270                                      void __iomem *ioaddr, u32 chan,
271                                      bool rx, bool tx)
272 {
273         u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
274
275         if (rx)
276                 value &= ~XGMAC_DMA_INT_DEFAULT_RX;
277         if (tx)
278                 value &= ~XGMAC_DMA_INT_DEFAULT_TX;
279
280         writel(value, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
281 }
282
283 static void dwxgmac2_dma_start_tx(struct stmmac_priv *priv,
284                                   void __iomem *ioaddr, u32 chan)
285 {
286         u32 value;
287
288         value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
289         value |= XGMAC_TXST;
290         writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
291
292         value = readl(ioaddr + XGMAC_TX_CONFIG);
293         value |= XGMAC_CONFIG_TE;
294         writel(value, ioaddr + XGMAC_TX_CONFIG);
295 }
296
297 static void dwxgmac2_dma_stop_tx(struct stmmac_priv *priv, void __iomem *ioaddr,
298                                  u32 chan)
299 {
300         u32 value;
301
302         value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
303         value &= ~XGMAC_TXST;
304         writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
305
306         value = readl(ioaddr + XGMAC_TX_CONFIG);
307         value &= ~XGMAC_CONFIG_TE;
308         writel(value, ioaddr + XGMAC_TX_CONFIG);
309 }
310
311 static void dwxgmac2_dma_start_rx(struct stmmac_priv *priv,
312                                   void __iomem *ioaddr, u32 chan)
313 {
314         u32 value;
315
316         value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
317         value |= XGMAC_RXST;
318         writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
319
320         value = readl(ioaddr + XGMAC_RX_CONFIG);
321         value |= XGMAC_CONFIG_RE;
322         writel(value, ioaddr + XGMAC_RX_CONFIG);
323 }
324
325 static void dwxgmac2_dma_stop_rx(struct stmmac_priv *priv, void __iomem *ioaddr,
326                                  u32 chan)
327 {
328         u32 value;
329
330         value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
331         value &= ~XGMAC_RXST;
332         writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
333 }
334
335 static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv,
336                                   void __iomem *ioaddr,
337                                   struct stmmac_extra_stats *x, u32 chan,
338                                   u32 dir)
339 {
340         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
341         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
342         u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan));
343         u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
344         int ret = 0;
345
346         if (dir == DMA_DIR_RX)
347                 intr_status &= XGMAC_DMA_STATUS_MSK_RX;
348         else if (dir == DMA_DIR_TX)
349                 intr_status &= XGMAC_DMA_STATUS_MSK_TX;
350
351         /* ABNORMAL interrupts */
352         if (unlikely(intr_status & XGMAC_AIS)) {
353                 if (unlikely(intr_status & XGMAC_RBU)) {
354                         x->rx_buf_unav_irq++;
355                         ret |= handle_rx;
356                 }
357                 if (unlikely(intr_status & XGMAC_TPS)) {
358                         x->tx_process_stopped_irq++;
359                         ret |= tx_hard_error;
360                 }
361                 if (unlikely(intr_status & XGMAC_FBE)) {
362                         x->fatal_bus_error_irq++;
363                         ret |= tx_hard_error;
364                 }
365         }
366
367         /* TX/RX NORMAL interrupts */
368         if (likely(intr_status & XGMAC_NIS)) {
369                 if (likely(intr_status & XGMAC_RI)) {
370                         u64_stats_update_begin(&rx_q->rxq_stats.syncp);
371                         rx_q->rxq_stats.rx_normal_irq_n++;
372                         u64_stats_update_end(&rx_q->rxq_stats.syncp);
373                         ret |= handle_rx;
374                 }
375                 if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) {
376                         u64_stats_update_begin(&tx_q->txq_stats.syncp);
377                         tx_q->txq_stats.tx_normal_irq_n++;
378                         u64_stats_update_end(&tx_q->txq_stats.syncp);
379                         ret |= handle_tx;
380                 }
381         }
382
383         /* Clear interrupts */
384         writel(intr_en & intr_status, ioaddr + XGMAC_DMA_CH_STATUS(chan));
385
386         return ret;
387 }
388
389 static int dwxgmac2_get_hw_feature(void __iomem *ioaddr,
390                                    struct dma_features *dma_cap)
391 {
392         u32 hw_cap;
393
394         /* MAC HW feature 0 */
395         hw_cap = readl(ioaddr + XGMAC_HW_FEATURE0);
396         dma_cap->edma = (hw_cap & XGMAC_HWFEAT_EDMA) >> 31;
397         dma_cap->ediffc = (hw_cap & XGMAC_HWFEAT_EDIFFC) >> 30;
398         dma_cap->vxn = (hw_cap & XGMAC_HWFEAT_VXN) >> 29;
399         dma_cap->vlins = (hw_cap & XGMAC_HWFEAT_SAVLANINS) >> 27;
400         dma_cap->tssrc = (hw_cap & XGMAC_HWFEAT_TSSTSSEL) >> 25;
401         dma_cap->multi_addr = (hw_cap & XGMAC_HWFEAT_ADDMACADRSEL) >> 18;
402         dma_cap->rx_coe = (hw_cap & XGMAC_HWFEAT_RXCOESEL) >> 16;
403         dma_cap->tx_coe = (hw_cap & XGMAC_HWFEAT_TXCOESEL) >> 14;
404         dma_cap->eee = (hw_cap & XGMAC_HWFEAT_EEESEL) >> 13;
405         dma_cap->atime_stamp = (hw_cap & XGMAC_HWFEAT_TSSEL) >> 12;
406         dma_cap->av = (hw_cap & XGMAC_HWFEAT_AVSEL) >> 11;
407         dma_cap->av &= !((hw_cap & XGMAC_HWFEAT_RAVSEL) >> 10);
408         dma_cap->arpoffsel = (hw_cap & XGMAC_HWFEAT_ARPOFFSEL) >> 9;
409         dma_cap->rmon = (hw_cap & XGMAC_HWFEAT_MMCSEL) >> 8;
410         dma_cap->pmt_magic_frame = (hw_cap & XGMAC_HWFEAT_MGKSEL) >> 7;
411         dma_cap->pmt_remote_wake_up = (hw_cap & XGMAC_HWFEAT_RWKSEL) >> 6;
412         dma_cap->sma_mdio = (hw_cap & XGMAC_HWFEAT_SMASEL) >> 5;
413         dma_cap->vlhash = (hw_cap & XGMAC_HWFEAT_VLHASH) >> 4;
414         dma_cap->half_duplex = (hw_cap & XGMAC_HWFEAT_HDSEL) >> 3;
415         dma_cap->mbps_1000 = (hw_cap & XGMAC_HWFEAT_GMIISEL) >> 1;
416
417         /* MAC HW feature 1 */
418         hw_cap = readl(ioaddr + XGMAC_HW_FEATURE1);
419         dma_cap->l3l4fnum = (hw_cap & XGMAC_HWFEAT_L3L4FNUM) >> 27;
420         /* If L3L4FNUM < 8, then the number of L3L4 filters supported by
421          * XGMAC is equal to L3L4FNUM. From L3L4FNUM >= 8 the number of
422          * L3L4 filters goes on like 8, 16, 32, ... Current maximum of
423          * L3L4FNUM = 10.
424          */
425         if (dma_cap->l3l4fnum >= 8 && dma_cap->l3l4fnum <= 10)
426                 dma_cap->l3l4fnum = 8 << (dma_cap->l3l4fnum - 8);
427         else if (dma_cap->l3l4fnum > 10)
428                 dma_cap->l3l4fnum = 32;
429
430         dma_cap->hash_tb_sz = (hw_cap & XGMAC_HWFEAT_HASHTBLSZ) >> 24;
431         dma_cap->numtc = ((hw_cap & XGMAC_HWFEAT_NUMTC) >> 21) + 1;
432         dma_cap->rssen = (hw_cap & XGMAC_HWFEAT_RSSEN) >> 20;
433         dma_cap->dbgmem = (hw_cap & XGMAC_HWFEAT_DBGMEMA) >> 19;
434         dma_cap->tsoen = (hw_cap & XGMAC_HWFEAT_TSOEN) >> 18;
435         dma_cap->sphen = (hw_cap & XGMAC_HWFEAT_SPHEN) >> 17;
436         dma_cap->dcben = (hw_cap & XGMAC_HWFEAT_DCBEN) >> 16;
437
438         dma_cap->addr64 = (hw_cap & XGMAC_HWFEAT_ADDR64) >> 14;
439         switch (dma_cap->addr64) {
440         case 0:
441                 dma_cap->addr64 = 32;
442                 break;
443         case 1:
444                 dma_cap->addr64 = 40;
445                 break;
446         case 2:
447                 dma_cap->addr64 = 48;
448                 break;
449         default:
450                 dma_cap->addr64 = 32;
451                 break;
452         }
453
454         dma_cap->advthword = (hw_cap & XGMAC_HWFEAT_ADVTHWORD) >> 13;
455         dma_cap->ptoen = (hw_cap & XGMAC_HWFEAT_PTOEN) >> 12;
456         dma_cap->osten = (hw_cap & XGMAC_HWFEAT_OSTEN) >> 11;
457         dma_cap->tx_fifo_size =
458                 128 << ((hw_cap & XGMAC_HWFEAT_TXFIFOSIZE) >> 6);
459         dma_cap->pfcen = (hw_cap & XGMAC_HWFEAT_PFCEN) >> 5;
460         dma_cap->rx_fifo_size =
461                 128 << ((hw_cap & XGMAC_HWFEAT_RXFIFOSIZE) >> 0);
462
463         /* MAC HW feature 2 */
464         hw_cap = readl(ioaddr + XGMAC_HW_FEATURE2);
465         dma_cap->aux_snapshot_n = (hw_cap & XGMAC_HWFEAT_AUXSNAPNUM) >> 28;
466         dma_cap->pps_out_num = (hw_cap & XGMAC_HWFEAT_PPSOUTNUM) >> 24;
467         dma_cap->number_tx_channel =
468                 ((hw_cap & XGMAC_HWFEAT_TXCHCNT) >> 18) + 1;
469         dma_cap->number_rx_channel =
470                 ((hw_cap & XGMAC_HWFEAT_RXCHCNT) >> 12) + 1;
471         dma_cap->number_tx_queues =
472                 ((hw_cap & XGMAC_HWFEAT_TXQCNT) >> 6) + 1;
473         dma_cap->number_rx_queues =
474                 ((hw_cap & XGMAC_HWFEAT_RXQCNT) >> 0) + 1;
475
476         /* MAC HW feature 3 */
477         hw_cap = readl(ioaddr + XGMAC_HW_FEATURE3);
478         dma_cap->tbs_ch_num = ((hw_cap & XGMAC_HWFEAT_TBSCH) >> 28) + 1;
479         dma_cap->tbssel = (hw_cap & XGMAC_HWFEAT_TBSSEL) >> 27;
480         dma_cap->fpesel = (hw_cap & XGMAC_HWFEAT_FPESEL) >> 26;
481         dma_cap->sgfsel = (hw_cap & XGMAC_HWFEAT_SGFSEL) >> 25;
482         dma_cap->estwid = (hw_cap & XGMAC_HWFEAT_ESTWID) >> 23;
483         dma_cap->estdep = (hw_cap & XGMAC_HWFEAT_ESTDEP) >> 20;
484         dma_cap->estsel = (hw_cap & XGMAC_HWFEAT_ESTSEL) >> 19;
485         dma_cap->ttsfd = (hw_cap & XGMAC_HWFEAT_TTSFD) >> 16;
486         dma_cap->asp = (hw_cap & XGMAC_HWFEAT_ASP) >> 14;
487         dma_cap->dvlan = (hw_cap & XGMAC_HWFEAT_DVLAN) >> 13;
488         dma_cap->frpes = (hw_cap & XGMAC_HWFEAT_FRPES) >> 11;
489         dma_cap->frpbs = (hw_cap & XGMAC_HWFEAT_FRPPB) >> 9;
490         dma_cap->pou_ost_en = (hw_cap & XGMAC_HWFEAT_POUOST) >> 8;
491         dma_cap->frppipe_num = ((hw_cap & XGMAC_HWFEAT_FRPPIPE) >> 5) + 1;
492         dma_cap->cbtisel = (hw_cap & XGMAC_HWFEAT_CBTISEL) >> 4;
493         dma_cap->frpsel = (hw_cap & XGMAC_HWFEAT_FRPSEL) >> 3;
494         dma_cap->nrvf_num = (hw_cap & XGMAC_HWFEAT_NRVF) >> 0;
495
496         /* MAC HW feature 4 */
497         hw_cap = readl(ioaddr + XGMAC_HW_FEATURE4);
498         dma_cap->asp |= (hw_cap & XGMAC_HWFEAT_EASP) >> 2;
499         dma_cap->pcsel = (hw_cap & XGMAC_HWFEAT_PCSEL) >> 0;
500
501         return 0;
502 }
503
504 static void dwxgmac2_rx_watchdog(struct stmmac_priv *priv, void __iomem *ioaddr,
505                                  u32 riwt, u32 queue)
506 {
507         writel(riwt & XGMAC_RWT, ioaddr + XGMAC_DMA_CH_Rx_WATCHDOG(queue));
508 }
509
510 static void dwxgmac2_set_rx_ring_len(struct stmmac_priv *priv,
511                                      void __iomem *ioaddr, u32 len, u32 chan)
512 {
513         writel(len, ioaddr + XGMAC_DMA_CH_RxDESC_RING_LEN(chan));
514 }
515
516 static void dwxgmac2_set_tx_ring_len(struct stmmac_priv *priv,
517                                      void __iomem *ioaddr, u32 len, u32 chan)
518 {
519         writel(len, ioaddr + XGMAC_DMA_CH_TxDESC_RING_LEN(chan));
520 }
521
522 static void dwxgmac2_set_rx_tail_ptr(struct stmmac_priv *priv,
523                                      void __iomem *ioaddr, u32 ptr, u32 chan)
524 {
525         writel(ptr, ioaddr + XGMAC_DMA_CH_RxDESC_TAIL_LPTR(chan));
526 }
527
528 static void dwxgmac2_set_tx_tail_ptr(struct stmmac_priv *priv,
529                                      void __iomem *ioaddr, u32 ptr, u32 chan)
530 {
531         writel(ptr, ioaddr + XGMAC_DMA_CH_TxDESC_TAIL_LPTR(chan));
532 }
533
534 static void dwxgmac2_enable_tso(struct stmmac_priv *priv, void __iomem *ioaddr,
535                                 bool en, u32 chan)
536 {
537         u32 value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
538
539         if (en)
540                 value |= XGMAC_TSE;
541         else
542                 value &= ~XGMAC_TSE;
543
544         writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
545 }
546
547 static void dwxgmac2_qmode(struct stmmac_priv *priv, void __iomem *ioaddr,
548                            u32 channel, u8 qmode)
549 {
550         u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
551         u32 flow = readl(ioaddr + XGMAC_RX_FLOW_CTRL);
552
553         value &= ~XGMAC_TXQEN;
554         if (qmode != MTL_QUEUE_AVB) {
555                 value |= 0x2 << XGMAC_TXQEN_SHIFT;
556                 writel(0, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(channel));
557         } else {
558                 value |= 0x1 << XGMAC_TXQEN_SHIFT;
559                 writel(flow & (~XGMAC_RFE), ioaddr + XGMAC_RX_FLOW_CTRL);
560         }
561
562         writel(value, ioaddr +  XGMAC_MTL_TXQ_OPMODE(channel));
563 }
564
565 static void dwxgmac2_set_bfsize(struct stmmac_priv *priv, void __iomem *ioaddr,
566                                 int bfsize, u32 chan)
567 {
568         u32 value;
569
570         value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
571         value &= ~XGMAC_RBSZ;
572         value |= bfsize << XGMAC_RBSZ_SHIFT;
573         writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
574 }
575
576 static void dwxgmac2_enable_sph(struct stmmac_priv *priv, void __iomem *ioaddr,
577                                 bool en, u32 chan)
578 {
579         u32 value = readl(ioaddr + XGMAC_RX_CONFIG);
580
581         value &= ~XGMAC_CONFIG_HDSMS;
582         value |= XGMAC_CONFIG_HDSMS_256; /* Segment max 256 bytes */
583         writel(value, ioaddr + XGMAC_RX_CONFIG);
584
585         value = readl(ioaddr + XGMAC_DMA_CH_CONTROL(chan));
586         if (en)
587                 value |= XGMAC_SPH;
588         else
589                 value &= ~XGMAC_SPH;
590         writel(value, ioaddr + XGMAC_DMA_CH_CONTROL(chan));
591 }
592
593 static int dwxgmac2_enable_tbs(struct stmmac_priv *priv, void __iomem *ioaddr,
594                                bool en, u32 chan)
595 {
596         u32 value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
597
598         if (en)
599                 value |= XGMAC_EDSE;
600         else
601                 value &= ~XGMAC_EDSE;
602
603         writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
604
605         value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)) & XGMAC_EDSE;
606         if (en && !value)
607                 return -EIO;
608
609         writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL0);
610         writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL1);
611         writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL2);
612         writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL3);
613         return 0;
614 }
615
616 const struct stmmac_dma_ops dwxgmac210_dma_ops = {
617         .reset = dwxgmac2_dma_reset,
618         .init = dwxgmac2_dma_init,
619         .init_chan = dwxgmac2_dma_init_chan,
620         .init_rx_chan = dwxgmac2_dma_init_rx_chan,
621         .init_tx_chan = dwxgmac2_dma_init_tx_chan,
622         .axi = dwxgmac2_dma_axi,
623         .dump_regs = dwxgmac2_dma_dump_regs,
624         .dma_rx_mode = dwxgmac2_dma_rx_mode,
625         .dma_tx_mode = dwxgmac2_dma_tx_mode,
626         .enable_dma_irq = dwxgmac2_enable_dma_irq,
627         .disable_dma_irq = dwxgmac2_disable_dma_irq,
628         .start_tx = dwxgmac2_dma_start_tx,
629         .stop_tx = dwxgmac2_dma_stop_tx,
630         .start_rx = dwxgmac2_dma_start_rx,
631         .stop_rx = dwxgmac2_dma_stop_rx,
632         .dma_interrupt = dwxgmac2_dma_interrupt,
633         .get_hw_feature = dwxgmac2_get_hw_feature,
634         .rx_watchdog = dwxgmac2_rx_watchdog,
635         .set_rx_ring_len = dwxgmac2_set_rx_ring_len,
636         .set_tx_ring_len = dwxgmac2_set_tx_ring_len,
637         .set_rx_tail_ptr = dwxgmac2_set_rx_tail_ptr,
638         .set_tx_tail_ptr = dwxgmac2_set_tx_tail_ptr,
639         .enable_tso = dwxgmac2_enable_tso,
640         .qmode = dwxgmac2_qmode,
641         .set_bfsize = dwxgmac2_set_bfsize,
642         .enable_sph = dwxgmac2_enable_sph,
643         .enable_tbs = dwxgmac2_enable_tbs,
644 };