1 // SPDX-License-Identifier: GPL-2.0
2 /* Renesas Ethernet Switch device driver
4 * Copyright (C) 2022 Renesas Electronics Corporation
7 #include <linux/dma-mapping.h>
9 #include <linux/etherdevice.h>
10 #include <linux/iopoll.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/net_tstamp.h>
15 #include <linux/of_mdio.h>
16 #include <linux/of_net.h>
17 #include <linux/phy/phy.h>
18 #include <linux/platform_device.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/rtnetlink.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 #include <linux/sys_soc.h>
27 static int rswitch_reg_wait(void __iomem *addr, u32 offs, u32 mask, u32 expected)
31 return readl_poll_timeout_atomic(addr + offs, val, (val & mask) == expected,
32 1, RSWITCH_TIMEOUT_US);
35 static void rswitch_modify(void __iomem *addr, enum rswitch_reg reg, u32 clear, u32 set)
37 iowrite32((ioread32(addr + reg) & ~clear) | set, addr + reg);
40 /* Common Agent block (COMA) */
41 static void rswitch_reset(struct rswitch_private *priv)
43 iowrite32(RRC_RR, priv->addr + RRC);
44 iowrite32(RRC_RR_CLR, priv->addr + RRC);
47 static void rswitch_clock_enable(struct rswitch_private *priv)
49 iowrite32(RCEC_ACE_DEFAULT | RCEC_RCE, priv->addr + RCEC);
52 static void rswitch_clock_disable(struct rswitch_private *priv)
54 iowrite32(RCDC_RCD, priv->addr + RCDC);
57 static bool rswitch_agent_clock_is_enabled(void __iomem *coma_addr, int port)
59 u32 val = ioread32(coma_addr + RCEC);
62 return (val & BIT(port)) ? true : false;
67 static void rswitch_agent_clock_ctrl(void __iomem *coma_addr, int port, int enable)
72 val = ioread32(coma_addr + RCEC);
73 iowrite32(val | RCEC_RCE | BIT(port), coma_addr + RCEC);
75 val = ioread32(coma_addr + RCDC);
76 iowrite32(val | BIT(port), coma_addr + RCDC);
80 static int rswitch_bpool_config(struct rswitch_private *priv)
84 val = ioread32(priv->addr + CABPIRM);
85 if (val & CABPIRM_BPR)
88 iowrite32(CABPIRM_BPIOG, priv->addr + CABPIRM);
90 return rswitch_reg_wait(priv->addr, CABPIRM, CABPIRM_BPR, CABPIRM_BPR);
93 static void rswitch_coma_init(struct rswitch_private *priv)
95 iowrite32(CABPPFLC_INIT_VALUE, priv->addr + CABPPFLC0);
98 /* R-Switch-2 block (TOP) */
99 static void rswitch_top_init(struct rswitch_private *priv)
103 for (i = 0; i < RSWITCH_MAX_NUM_QUEUES; i++)
104 iowrite32((i / 16) << (GWCA_INDEX * 8), priv->addr + TPEMIMC7(i));
107 /* Forwarding engine block (MFWD) */
108 static void rswitch_fwd_init(struct rswitch_private *priv)
113 for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
114 iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(i));
115 iowrite32(0, priv->addr + FWPBFC(i));
118 for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
119 iowrite32(priv->rdev[i]->rx_queue->index,
120 priv->addr + FWPBFCSDC(GWCA_INDEX, i));
121 iowrite32(BIT(priv->gwca.index), priv->addr + FWPBFC(i));
125 iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(priv->gwca.index));
126 iowrite32(FWPC1_DDE, priv->addr + FWPC1(priv->gwca.index));
127 iowrite32(0, priv->addr + FWPBFC(priv->gwca.index));
128 iowrite32(GENMASK(RSWITCH_NUM_PORTS - 1, 0), priv->addr + FWPBFC(priv->gwca.index));
131 /* Gateway CPU agent block (GWCA) */
132 static int rswitch_gwca_change_mode(struct rswitch_private *priv,
133 enum rswitch_gwca_mode mode)
137 if (!rswitch_agent_clock_is_enabled(priv->addr, priv->gwca.index))
138 rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 1);
140 iowrite32(mode, priv->addr + GWMC);
142 ret = rswitch_reg_wait(priv->addr, GWMS, GWMS_OPS_MASK, mode);
144 if (mode == GWMC_OPC_DISABLE)
145 rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 0);
150 static int rswitch_gwca_mcast_table_reset(struct rswitch_private *priv)
152 iowrite32(GWMTIRM_MTIOG, priv->addr + GWMTIRM);
154 return rswitch_reg_wait(priv->addr, GWMTIRM, GWMTIRM_MTR, GWMTIRM_MTR);
157 static int rswitch_gwca_axi_ram_reset(struct rswitch_private *priv)
159 iowrite32(GWARIRM_ARIOG, priv->addr + GWARIRM);
161 return rswitch_reg_wait(priv->addr, GWARIRM, GWARIRM_ARR, GWARIRM_ARR);
164 static bool rswitch_is_any_data_irq(struct rswitch_private *priv, u32 *dis, bool tx)
166 u32 *mask = tx ? priv->gwca.tx_irq_bits : priv->gwca.rx_irq_bits;
169 for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) {
170 if (dis[i] & mask[i])
177 static void rswitch_get_data_irq_status(struct rswitch_private *priv, u32 *dis)
181 for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) {
182 dis[i] = ioread32(priv->addr + GWDIS(i));
183 dis[i] &= ioread32(priv->addr + GWDIE(i));
187 static void rswitch_enadis_data_irq(struct rswitch_private *priv, int index, bool enable)
189 u32 offs = enable ? GWDIE(index / 32) : GWDID(index / 32);
191 iowrite32(BIT(index % 32), priv->addr + offs);
194 static void rswitch_ack_data_irq(struct rswitch_private *priv, int index)
196 u32 offs = GWDIS(index / 32);
198 iowrite32(BIT(index % 32), priv->addr + offs);
201 static int rswitch_next_queue_index(struct rswitch_gwca_queue *gq, bool cur, int num)
203 int index = cur ? gq->cur : gq->dirty;
205 if (index + num >= gq->ring_size)
206 index = (index + num) % gq->ring_size;
213 static int rswitch_get_num_cur_queues(struct rswitch_gwca_queue *gq)
215 if (gq->cur >= gq->dirty)
216 return gq->cur - gq->dirty;
218 return gq->ring_size - gq->dirty + gq->cur;
221 static bool rswitch_is_queue_rxed(struct rswitch_gwca_queue *gq)
223 struct rswitch_ext_ts_desc *desc = &gq->rx_ring[gq->dirty];
225 if ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY)
231 static int rswitch_gwca_queue_alloc_skb(struct rswitch_gwca_queue *gq,
232 int start_index, int num)
236 for (i = 0; i < num; i++) {
237 index = (i + start_index) % gq->ring_size;
240 gq->skbs[index] = netdev_alloc_skb_ip_align(gq->ndev,
241 PKT_BUF_SZ + RSWITCH_ALIGN - 1);
242 if (!gq->skbs[index])
249 for (i--; i >= 0; i--) {
250 index = (i + start_index) % gq->ring_size;
251 dev_kfree_skb(gq->skbs[index]);
252 gq->skbs[index] = NULL;
258 static void rswitch_gwca_queue_free(struct net_device *ndev,
259 struct rswitch_gwca_queue *gq)
264 dma_free_coherent(ndev->dev.parent,
265 sizeof(struct rswitch_ext_ts_desc) *
266 (gq->ring_size + 1), gq->rx_ring, gq->ring_dma);
269 for (i = 0; i < gq->ring_size; i++)
270 dev_kfree_skb(gq->skbs[i]);
272 dma_free_coherent(ndev->dev.parent,
273 sizeof(struct rswitch_ext_desc) *
274 (gq->ring_size + 1), gq->tx_ring, gq->ring_dma);
282 static void rswitch_gwca_ts_queue_free(struct rswitch_private *priv)
284 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
286 dma_free_coherent(&priv->pdev->dev,
287 sizeof(struct rswitch_ts_desc) * (gq->ring_size + 1),
288 gq->ts_ring, gq->ring_dma);
292 static int rswitch_gwca_queue_alloc(struct net_device *ndev,
293 struct rswitch_private *priv,
294 struct rswitch_gwca_queue *gq,
295 bool dir_tx, int ring_size)
300 gq->ring_size = ring_size;
303 gq->skbs = kcalloc(gq->ring_size, sizeof(*gq->skbs), GFP_KERNEL);
308 rswitch_gwca_queue_alloc_skb(gq, 0, gq->ring_size);
310 gq->rx_ring = dma_alloc_coherent(ndev->dev.parent,
311 sizeof(struct rswitch_ext_ts_desc) *
312 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
314 gq->tx_ring = dma_alloc_coherent(ndev->dev.parent,
315 sizeof(struct rswitch_ext_desc) *
316 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
319 if (!gq->rx_ring && !gq->tx_ring)
323 bit = BIT(gq->index % 32);
325 priv->gwca.tx_irq_bits[i] |= bit;
327 priv->gwca.rx_irq_bits[i] |= bit;
332 rswitch_gwca_queue_free(ndev, gq);
337 static void rswitch_desc_set_dptr(struct rswitch_desc *desc, dma_addr_t addr)
339 desc->dptrl = cpu_to_le32(lower_32_bits(addr));
340 desc->dptrh = upper_32_bits(addr) & 0xff;
343 static dma_addr_t rswitch_desc_get_dptr(const struct rswitch_desc *desc)
345 return __le32_to_cpu(desc->dptrl) | (u64)(desc->dptrh) << 32;
348 static int rswitch_gwca_queue_format(struct net_device *ndev,
349 struct rswitch_private *priv,
350 struct rswitch_gwca_queue *gq)
352 int ring_size = sizeof(struct rswitch_ext_desc) * gq->ring_size;
353 struct rswitch_ext_desc *desc;
354 struct rswitch_desc *linkfix;
358 memset(gq->tx_ring, 0, ring_size);
359 for (i = 0, desc = gq->tx_ring; i < gq->ring_size; i++, desc++) {
361 dma_addr = dma_map_single(ndev->dev.parent,
362 gq->skbs[i]->data, PKT_BUF_SZ,
364 if (dma_mapping_error(ndev->dev.parent, dma_addr))
367 desc->desc.info_ds = cpu_to_le16(PKT_BUF_SZ);
368 rswitch_desc_set_dptr(&desc->desc, dma_addr);
369 desc->desc.die_dt = DT_FEMPTY | DIE;
371 desc->desc.die_dt = DT_EEMPTY | DIE;
374 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
375 desc->desc.die_dt = DT_LINKFIX;
377 linkfix = &priv->gwca.linkfix_table[gq->index];
378 linkfix->die_dt = DT_LINKFIX;
379 rswitch_desc_set_dptr(linkfix, gq->ring_dma);
381 iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) | GWDCC_EDE,
382 priv->addr + GWDCC_OFFS(gq->index));
388 for (i--, desc = gq->tx_ring; i >= 0; i--, desc++) {
389 dma_addr = rswitch_desc_get_dptr(&desc->desc);
390 dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ,
398 static void rswitch_gwca_ts_queue_fill(struct rswitch_private *priv,
399 int start_index, int num)
401 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
402 struct rswitch_ts_desc *desc;
405 for (i = 0; i < num; i++) {
406 index = (i + start_index) % gq->ring_size;
407 desc = &gq->ts_ring[index];
408 desc->desc.die_dt = DT_FEMPTY_ND | DIE;
412 static int rswitch_gwca_queue_ext_ts_fill(struct net_device *ndev,
413 struct rswitch_gwca_queue *gq,
414 int start_index, int num)
416 struct rswitch_device *rdev = netdev_priv(ndev);
417 struct rswitch_ext_ts_desc *desc;
421 for (i = 0; i < num; i++) {
422 index = (i + start_index) % gq->ring_size;
423 desc = &gq->rx_ring[index];
425 dma_addr = dma_map_single(ndev->dev.parent,
426 gq->skbs[index]->data, PKT_BUF_SZ,
428 if (dma_mapping_error(ndev->dev.parent, dma_addr))
431 desc->desc.info_ds = cpu_to_le16(PKT_BUF_SZ);
432 rswitch_desc_set_dptr(&desc->desc, dma_addr);
434 desc->desc.die_dt = DT_FEMPTY | DIE;
435 desc->info1 = cpu_to_le64(INFO1_SPN(rdev->etha->index));
437 desc->desc.die_dt = DT_EEMPTY | DIE;
445 for (i--; i >= 0; i--) {
446 index = (i + start_index) % gq->ring_size;
447 desc = &gq->rx_ring[index];
448 dma_addr = rswitch_desc_get_dptr(&desc->desc);
449 dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ,
457 static int rswitch_gwca_queue_ext_ts_format(struct net_device *ndev,
458 struct rswitch_private *priv,
459 struct rswitch_gwca_queue *gq)
461 int ring_size = sizeof(struct rswitch_ext_ts_desc) * gq->ring_size;
462 struct rswitch_ext_ts_desc *desc;
463 struct rswitch_desc *linkfix;
466 memset(gq->rx_ring, 0, ring_size);
467 err = rswitch_gwca_queue_ext_ts_fill(ndev, gq, 0, gq->ring_size);
471 desc = &gq->rx_ring[gq->ring_size]; /* Last */
472 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
473 desc->desc.die_dt = DT_LINKFIX;
475 linkfix = &priv->gwca.linkfix_table[gq->index];
476 linkfix->die_dt = DT_LINKFIX;
477 rswitch_desc_set_dptr(linkfix, gq->ring_dma);
479 iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) |
480 GWDCC_ETS | GWDCC_EDE,
481 priv->addr + GWDCC_OFFS(gq->index));
486 static int rswitch_gwca_linkfix_alloc(struct rswitch_private *priv)
488 int i, num_queues = priv->gwca.num_queues;
489 struct rswitch_gwca *gwca = &priv->gwca;
490 struct device *dev = &priv->pdev->dev;
492 gwca->linkfix_table_size = sizeof(struct rswitch_desc) * num_queues;
493 gwca->linkfix_table = dma_alloc_coherent(dev, gwca->linkfix_table_size,
494 &gwca->linkfix_table_dma, GFP_KERNEL);
495 if (!gwca->linkfix_table)
497 for (i = 0; i < num_queues; i++)
498 gwca->linkfix_table[i].die_dt = DT_EOS;
503 static void rswitch_gwca_linkfix_free(struct rswitch_private *priv)
505 struct rswitch_gwca *gwca = &priv->gwca;
507 if (gwca->linkfix_table)
508 dma_free_coherent(&priv->pdev->dev, gwca->linkfix_table_size,
509 gwca->linkfix_table, gwca->linkfix_table_dma);
510 gwca->linkfix_table = NULL;
513 static int rswitch_gwca_ts_queue_alloc(struct rswitch_private *priv)
515 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
516 struct rswitch_ts_desc *desc;
518 gq->ring_size = TS_RING_SIZE;
519 gq->ts_ring = dma_alloc_coherent(&priv->pdev->dev,
520 sizeof(struct rswitch_ts_desc) *
521 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
526 rswitch_gwca_ts_queue_fill(priv, 0, TS_RING_SIZE);
527 desc = &gq->ts_ring[gq->ring_size];
528 desc->desc.die_dt = DT_LINKFIX;
529 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
530 INIT_LIST_HEAD(&priv->gwca.ts_info_list);
535 static struct rswitch_gwca_queue *rswitch_gwca_get(struct rswitch_private *priv)
537 struct rswitch_gwca_queue *gq;
540 index = find_first_zero_bit(priv->gwca.used, priv->gwca.num_queues);
541 if (index >= priv->gwca.num_queues)
543 set_bit(index, priv->gwca.used);
544 gq = &priv->gwca.queues[index];
545 memset(gq, 0, sizeof(*gq));
551 static void rswitch_gwca_put(struct rswitch_private *priv,
552 struct rswitch_gwca_queue *gq)
554 clear_bit(gq->index, priv->gwca.used);
557 static int rswitch_txdmac_alloc(struct net_device *ndev)
559 struct rswitch_device *rdev = netdev_priv(ndev);
560 struct rswitch_private *priv = rdev->priv;
563 rdev->tx_queue = rswitch_gwca_get(priv);
567 err = rswitch_gwca_queue_alloc(ndev, priv, rdev->tx_queue, true, TX_RING_SIZE);
569 rswitch_gwca_put(priv, rdev->tx_queue);
576 static void rswitch_txdmac_free(struct net_device *ndev)
578 struct rswitch_device *rdev = netdev_priv(ndev);
580 rswitch_gwca_queue_free(ndev, rdev->tx_queue);
581 rswitch_gwca_put(rdev->priv, rdev->tx_queue);
584 static int rswitch_txdmac_init(struct rswitch_private *priv, int index)
586 struct rswitch_device *rdev = priv->rdev[index];
588 return rswitch_gwca_queue_format(rdev->ndev, priv, rdev->tx_queue);
591 static int rswitch_rxdmac_alloc(struct net_device *ndev)
593 struct rswitch_device *rdev = netdev_priv(ndev);
594 struct rswitch_private *priv = rdev->priv;
597 rdev->rx_queue = rswitch_gwca_get(priv);
601 err = rswitch_gwca_queue_alloc(ndev, priv, rdev->rx_queue, false, RX_RING_SIZE);
603 rswitch_gwca_put(priv, rdev->rx_queue);
610 static void rswitch_rxdmac_free(struct net_device *ndev)
612 struct rswitch_device *rdev = netdev_priv(ndev);
614 rswitch_gwca_queue_free(ndev, rdev->rx_queue);
615 rswitch_gwca_put(rdev->priv, rdev->rx_queue);
618 static int rswitch_rxdmac_init(struct rswitch_private *priv, int index)
620 struct rswitch_device *rdev = priv->rdev[index];
621 struct net_device *ndev = rdev->ndev;
623 return rswitch_gwca_queue_ext_ts_format(ndev, priv, rdev->rx_queue);
626 static int rswitch_gwca_hw_init(struct rswitch_private *priv)
630 err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
633 err = rswitch_gwca_change_mode(priv, GWMC_OPC_CONFIG);
637 err = rswitch_gwca_mcast_table_reset(priv);
640 err = rswitch_gwca_axi_ram_reset(priv);
644 iowrite32(GWVCC_VEM_SC_TAG, priv->addr + GWVCC);
645 iowrite32(0, priv->addr + GWTTFC);
646 iowrite32(lower_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC1);
647 iowrite32(upper_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC0);
648 iowrite32(lower_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC10);
649 iowrite32(upper_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC00);
650 iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDCC0);
652 iowrite32(GWTPC_PPPL(GWCA_IPV_NUM), priv->addr + GWTPC0);
654 for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
655 err = rswitch_rxdmac_init(priv, i);
658 err = rswitch_txdmac_init(priv, i);
663 err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
666 return rswitch_gwca_change_mode(priv, GWMC_OPC_OPERATION);
669 static int rswitch_gwca_hw_deinit(struct rswitch_private *priv)
673 err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
676 err = rswitch_gwca_change_mode(priv, GWMC_OPC_RESET);
680 return rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
683 static int rswitch_gwca_halt(struct rswitch_private *priv)
687 priv->gwca_halt = true;
688 err = rswitch_gwca_hw_deinit(priv);
689 dev_err(&priv->pdev->dev, "halted (%d)\n", err);
694 static bool rswitch_rx(struct net_device *ndev, int *quota)
696 struct rswitch_device *rdev = netdev_priv(ndev);
697 struct rswitch_gwca_queue *gq = rdev->rx_queue;
698 struct rswitch_ext_ts_desc *desc;
699 int limit, boguscnt, num, ret;
708 boguscnt = min_t(int, gq->ring_size, *quota);
711 desc = &gq->rx_ring[gq->cur];
712 while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) {
714 pkt_len = le16_to_cpu(desc->desc.info_ds) & RX_DS;
715 skb = gq->skbs[gq->cur];
716 gq->skbs[gq->cur] = NULL;
717 dma_addr = rswitch_desc_get_dptr(&desc->desc);
718 dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ, DMA_FROM_DEVICE);
719 get_ts = rdev->priv->ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
721 struct skb_shared_hwtstamps *shhwtstamps;
722 struct timespec64 ts;
724 shhwtstamps = skb_hwtstamps(skb);
725 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
726 ts.tv_sec = __le32_to_cpu(desc->ts_sec);
727 ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
728 shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
730 skb_put(skb, pkt_len);
731 skb->protocol = eth_type_trans(skb, ndev);
732 napi_gro_receive(&rdev->napi, skb);
733 rdev->ndev->stats.rx_packets++;
734 rdev->ndev->stats.rx_bytes += pkt_len;
736 gq->cur = rswitch_next_queue_index(gq, true, 1);
737 desc = &gq->rx_ring[gq->cur];
743 num = rswitch_get_num_cur_queues(gq);
744 ret = rswitch_gwca_queue_alloc_skb(gq, gq->dirty, num);
747 ret = rswitch_gwca_queue_ext_ts_fill(ndev, gq, gq->dirty, num);
750 gq->dirty = rswitch_next_queue_index(gq, false, num);
752 *quota -= limit - boguscnt;
754 return boguscnt <= 0;
757 rswitch_gwca_halt(rdev->priv);
762 static int rswitch_tx_free(struct net_device *ndev, bool free_txed_only)
764 struct rswitch_device *rdev = netdev_priv(ndev);
765 struct rswitch_gwca_queue *gq = rdev->tx_queue;
766 struct rswitch_ext_desc *desc;
772 for (; rswitch_get_num_cur_queues(gq) > 0;
773 gq->dirty = rswitch_next_queue_index(gq, false, 1)) {
774 desc = &gq->tx_ring[gq->dirty];
775 if (free_txed_only && (desc->desc.die_dt & DT_MASK) != DT_FEMPTY)
779 size = le16_to_cpu(desc->desc.info_ds) & TX_DS;
780 skb = gq->skbs[gq->dirty];
782 dma_addr = rswitch_desc_get_dptr(&desc->desc);
783 dma_unmap_single(ndev->dev.parent, dma_addr,
784 size, DMA_TO_DEVICE);
785 dev_kfree_skb_any(gq->skbs[gq->dirty]);
786 gq->skbs[gq->dirty] = NULL;
789 desc->desc.die_dt = DT_EEMPTY;
790 rdev->ndev->stats.tx_packets++;
791 rdev->ndev->stats.tx_bytes += size;
797 static int rswitch_poll(struct napi_struct *napi, int budget)
799 struct net_device *ndev = napi->dev;
800 struct rswitch_private *priv;
801 struct rswitch_device *rdev;
805 rdev = netdev_priv(ndev);
809 rswitch_tx_free(ndev, true);
811 if (rswitch_rx(ndev, "a))
813 else if (rdev->priv->gwca_halt)
815 else if (rswitch_is_queue_rxed(rdev->rx_queue))
818 netif_wake_subqueue(ndev, 0);
820 if (napi_complete_done(napi, budget - quota)) {
821 spin_lock_irqsave(&priv->lock, flags);
822 rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true);
823 rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true);
824 spin_unlock_irqrestore(&priv->lock, flags);
828 return budget - quota;
836 static void rswitch_queue_interrupt(struct net_device *ndev)
838 struct rswitch_device *rdev = netdev_priv(ndev);
840 if (napi_schedule_prep(&rdev->napi)) {
841 spin_lock(&rdev->priv->lock);
842 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
843 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
844 spin_unlock(&rdev->priv->lock);
845 __napi_schedule(&rdev->napi);
849 static irqreturn_t rswitch_data_irq(struct rswitch_private *priv, u32 *dis)
851 struct rswitch_gwca_queue *gq;
854 for (i = 0; i < priv->gwca.num_queues; i++) {
855 gq = &priv->gwca.queues[i];
856 index = gq->index / 32;
857 bit = BIT(gq->index % 32);
858 if (!(dis[index] & bit))
861 rswitch_ack_data_irq(priv, gq->index);
862 rswitch_queue_interrupt(gq->ndev);
868 static irqreturn_t rswitch_gwca_irq(int irq, void *dev_id)
870 struct rswitch_private *priv = dev_id;
871 u32 dis[RSWITCH_NUM_IRQ_REGS];
872 irqreturn_t ret = IRQ_NONE;
874 rswitch_get_data_irq_status(priv, dis);
876 if (rswitch_is_any_data_irq(priv, dis, true) ||
877 rswitch_is_any_data_irq(priv, dis, false))
878 ret = rswitch_data_irq(priv, dis);
883 static int rswitch_gwca_request_irqs(struct rswitch_private *priv)
885 char *resource_name, *irq_name;
888 for (i = 0; i < GWCA_NUM_IRQS; i++) {
889 resource_name = kasprintf(GFP_KERNEL, GWCA_IRQ_RESOURCE_NAME, i);
893 irq = platform_get_irq_byname(priv->pdev, resource_name);
894 kfree(resource_name);
898 irq_name = devm_kasprintf(&priv->pdev->dev, GFP_KERNEL,
903 ret = devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_irq,
912 static void rswitch_ts(struct rswitch_private *priv)
914 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
915 struct rswitch_gwca_ts_info *ts_info, *ts_info2;
916 struct skb_shared_hwtstamps shhwtstamps;
917 struct rswitch_ts_desc *desc;
918 struct timespec64 ts;
922 desc = &gq->ts_ring[gq->cur];
923 while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY_ND) {
926 port = TS_DESC_DPN(__le32_to_cpu(desc->desc.dptrl));
927 tag = TS_DESC_TSUN(__le32_to_cpu(desc->desc.dptrl));
929 list_for_each_entry_safe(ts_info, ts_info2, &priv->gwca.ts_info_list, list) {
930 if (!(ts_info->port == port && ts_info->tag == tag))
933 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
934 ts.tv_sec = __le32_to_cpu(desc->ts_sec);
935 ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
936 shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
937 skb_tstamp_tx(ts_info->skb, &shhwtstamps);
938 dev_consume_skb_irq(ts_info->skb);
939 list_del(&ts_info->list);
944 gq->cur = rswitch_next_queue_index(gq, true, 1);
945 desc = &gq->ts_ring[gq->cur];
948 num = rswitch_get_num_cur_queues(gq);
949 rswitch_gwca_ts_queue_fill(priv, gq->dirty, num);
950 gq->dirty = rswitch_next_queue_index(gq, false, num);
953 static irqreturn_t rswitch_gwca_ts_irq(int irq, void *dev_id)
955 struct rswitch_private *priv = dev_id;
957 if (ioread32(priv->addr + GWTSDIS) & GWCA_TS_IRQ_BIT) {
958 iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDIS);
967 static int rswitch_gwca_ts_request_irqs(struct rswitch_private *priv)
971 irq = platform_get_irq_byname(priv->pdev, GWCA_TS_IRQ_RESOURCE_NAME);
975 return devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_ts_irq,
976 0, GWCA_TS_IRQ_NAME, priv);
979 /* Ethernet TSN Agent block (ETHA) and Ethernet MAC IP block (RMAC) */
980 static int rswitch_etha_change_mode(struct rswitch_etha *etha,
981 enum rswitch_etha_mode mode)
985 if (!rswitch_agent_clock_is_enabled(etha->coma_addr, etha->index))
986 rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 1);
988 iowrite32(mode, etha->addr + EAMC);
990 ret = rswitch_reg_wait(etha->addr, EAMS, EAMS_OPS_MASK, mode);
992 if (mode == EAMC_OPC_DISABLE)
993 rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 0);
998 static void rswitch_etha_read_mac_address(struct rswitch_etha *etha)
1000 u32 mrmac0 = ioread32(etha->addr + MRMAC0);
1001 u32 mrmac1 = ioread32(etha->addr + MRMAC1);
1002 u8 *mac = ða->mac_addr[0];
1004 mac[0] = (mrmac0 >> 8) & 0xFF;
1005 mac[1] = (mrmac0 >> 0) & 0xFF;
1006 mac[2] = (mrmac1 >> 24) & 0xFF;
1007 mac[3] = (mrmac1 >> 16) & 0xFF;
1008 mac[4] = (mrmac1 >> 8) & 0xFF;
1009 mac[5] = (mrmac1 >> 0) & 0xFF;
1012 static void rswitch_etha_write_mac_address(struct rswitch_etha *etha, const u8 *mac)
1014 iowrite32((mac[0] << 8) | mac[1], etha->addr + MRMAC0);
1015 iowrite32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
1016 etha->addr + MRMAC1);
1019 static int rswitch_etha_wait_link_verification(struct rswitch_etha *etha)
1021 iowrite32(MLVC_PLV, etha->addr + MLVC);
1023 return rswitch_reg_wait(etha->addr, MLVC, MLVC_PLV, 0);
1026 static void rswitch_rmac_setting(struct rswitch_etha *etha, const u8 *mac)
1030 rswitch_etha_write_mac_address(etha, mac);
1032 switch (etha->speed) {
1034 val = MPIC_LSC_100M;
1040 val = MPIC_LSC_2_5G;
1046 iowrite32(MPIC_PIS_GMII | val, etha->addr + MPIC);
1049 static void rswitch_etha_enable_mii(struct rswitch_etha *etha)
1051 rswitch_modify(etha->addr, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK,
1052 MPIC_PSMCS(0x05) | MPIC_PSMHT(0x06));
1053 rswitch_modify(etha->addr, MPSM, 0, MPSM_MFF_C45);
1056 static int rswitch_etha_hw_init(struct rswitch_etha *etha, const u8 *mac)
1060 err = rswitch_etha_change_mode(etha, EAMC_OPC_DISABLE);
1063 err = rswitch_etha_change_mode(etha, EAMC_OPC_CONFIG);
1067 iowrite32(EAVCC_VEM_SC_TAG, etha->addr + EAVCC);
1068 rswitch_rmac_setting(etha, mac);
1069 rswitch_etha_enable_mii(etha);
1071 err = rswitch_etha_wait_link_verification(etha);
1075 err = rswitch_etha_change_mode(etha, EAMC_OPC_DISABLE);
1079 return rswitch_etha_change_mode(etha, EAMC_OPC_OPERATION);
1082 static int rswitch_etha_set_access(struct rswitch_etha *etha, bool read,
1083 int phyad, int devad, int regad, int data)
1085 int pop = read ? MDIO_READ_C45 : MDIO_WRITE_C45;
1089 if (devad == 0xffffffff)
1092 writel(MMIS1_CLEAR_FLAGS, etha->addr + MMIS1);
1094 val = MPSM_PSME | MPSM_MFF_C45;
1095 iowrite32((regad << 16) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM);
1097 ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS);
1101 rswitch_modify(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS);
1104 writel((pop << 13) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM);
1106 ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS);
1110 ret = (ioread32(etha->addr + MPSM) & MPSM_PRD_MASK) >> 16;
1112 rswitch_modify(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS);
1114 iowrite32((data << 16) | (pop << 13) | (devad << 8) | (phyad << 3) | val,
1117 ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PWACS, MMIS1_PWACS);
1123 static int rswitch_etha_mii_read_c45(struct mii_bus *bus, int addr, int devad,
1126 struct rswitch_etha *etha = bus->priv;
1128 return rswitch_etha_set_access(etha, true, addr, devad, regad, 0);
1131 static int rswitch_etha_mii_write_c45(struct mii_bus *bus, int addr, int devad,
1134 struct rswitch_etha *etha = bus->priv;
1136 return rswitch_etha_set_access(etha, false, addr, devad, regad, val);
1139 /* Call of_node_put(port) after done */
1140 static struct device_node *rswitch_get_port_node(struct rswitch_device *rdev)
1142 struct device_node *ports, *port;
1146 ports = of_get_child_by_name(rdev->ndev->dev.parent->of_node,
1151 for_each_child_of_node(ports, port) {
1152 err = of_property_read_u32(port, "reg", &index);
1157 if (index == rdev->etha->index) {
1158 if (!of_device_is_available(port))
1170 static int rswitch_etha_get_params(struct rswitch_device *rdev)
1176 return 0; /* ignored */
1178 err = of_get_phy_mode(rdev->np_port, &rdev->etha->phy_interface);
1182 err = of_property_read_u32(rdev->np_port, "max-speed", &max_speed);
1184 rdev->etha->speed = max_speed;
1188 /* if no "max-speed" property, let's use default speed */
1189 switch (rdev->etha->phy_interface) {
1190 case PHY_INTERFACE_MODE_MII:
1191 rdev->etha->speed = SPEED_100;
1193 case PHY_INTERFACE_MODE_SGMII:
1194 rdev->etha->speed = SPEED_1000;
1196 case PHY_INTERFACE_MODE_USXGMII:
1197 rdev->etha->speed = SPEED_2500;
1206 static int rswitch_mii_register(struct rswitch_device *rdev)
1208 struct device_node *mdio_np;
1209 struct mii_bus *mii_bus;
1212 mii_bus = mdiobus_alloc();
1216 mii_bus->name = "rswitch_mii";
1217 sprintf(mii_bus->id, "etha%d", rdev->etha->index);
1218 mii_bus->priv = rdev->etha;
1219 mii_bus->read_c45 = rswitch_etha_mii_read_c45;
1220 mii_bus->write_c45 = rswitch_etha_mii_write_c45;
1221 mii_bus->parent = &rdev->priv->pdev->dev;
1223 mdio_np = of_get_child_by_name(rdev->np_port, "mdio");
1224 err = of_mdiobus_register(mii_bus, mdio_np);
1226 mdiobus_free(mii_bus);
1230 rdev->etha->mii = mii_bus;
1233 of_node_put(mdio_np);
1238 static void rswitch_mii_unregister(struct rswitch_device *rdev)
1240 if (rdev->etha->mii) {
1241 mdiobus_unregister(rdev->etha->mii);
1242 mdiobus_free(rdev->etha->mii);
1243 rdev->etha->mii = NULL;
1247 static void rswitch_adjust_link(struct net_device *ndev)
1249 struct rswitch_device *rdev = netdev_priv(ndev);
1250 struct phy_device *phydev = ndev->phydev;
1252 if (phydev->link != rdev->etha->link) {
1253 phy_print_status(phydev);
1255 phy_power_on(rdev->serdes);
1257 phy_power_off(rdev->serdes);
1259 rdev->etha->link = phydev->link;
1261 if (!rdev->priv->etha_no_runtime_change &&
1262 phydev->speed != rdev->etha->speed) {
1263 rdev->etha->speed = phydev->speed;
1265 rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr);
1266 phy_set_speed(rdev->serdes, rdev->etha->speed);
1271 static void rswitch_phy_remove_link_mode(struct rswitch_device *rdev,
1272 struct phy_device *phydev)
1274 if (!rdev->priv->etha_no_runtime_change)
1277 switch (rdev->etha->speed) {
1279 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
1280 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
1283 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_2500baseX_Full_BIT);
1284 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
1287 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_2500baseX_Full_BIT);
1288 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
1294 phy_set_max_speed(phydev, rdev->etha->speed);
1297 static int rswitch_phy_device_init(struct rswitch_device *rdev)
1299 struct phy_device *phydev;
1300 struct device_node *phy;
1306 phy = of_parse_phandle(rdev->np_port, "phy-handle", 0);
1310 /* Set phydev->host_interfaces before calling of_phy_connect() to
1311 * configure the PHY with the information of host_interfaces.
1313 phydev = of_phy_find_device(phy);
1316 __set_bit(rdev->etha->phy_interface, phydev->host_interfaces);
1318 phydev = of_phy_connect(rdev->ndev, phy, rswitch_adjust_link, 0,
1319 rdev->etha->phy_interface);
1323 phy_set_max_speed(phydev, SPEED_2500);
1324 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
1325 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
1326 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
1327 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
1328 rswitch_phy_remove_link_mode(rdev, phydev);
1330 phy_attached_info(phydev);
1339 static void rswitch_phy_device_deinit(struct rswitch_device *rdev)
1341 if (rdev->ndev->phydev)
1342 phy_disconnect(rdev->ndev->phydev);
1345 static int rswitch_serdes_set_params(struct rswitch_device *rdev)
1349 err = phy_set_mode_ext(rdev->serdes, PHY_MODE_ETHERNET,
1350 rdev->etha->phy_interface);
1354 return phy_set_speed(rdev->serdes, rdev->etha->speed);
1357 static int rswitch_ether_port_init_one(struct rswitch_device *rdev)
1361 if (!rdev->etha->operated) {
1362 err = rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr);
1365 if (rdev->priv->etha_no_runtime_change)
1366 rdev->etha->operated = true;
1369 err = rswitch_mii_register(rdev);
1373 err = rswitch_phy_device_init(rdev);
1375 goto err_phy_device_init;
1377 rdev->serdes = devm_of_phy_get(&rdev->priv->pdev->dev, rdev->np_port, NULL);
1378 if (IS_ERR(rdev->serdes)) {
1379 err = PTR_ERR(rdev->serdes);
1380 goto err_serdes_phy_get;
1383 err = rswitch_serdes_set_params(rdev);
1385 goto err_serdes_set_params;
1389 err_serdes_set_params:
1391 rswitch_phy_device_deinit(rdev);
1393 err_phy_device_init:
1394 rswitch_mii_unregister(rdev);
1399 static void rswitch_ether_port_deinit_one(struct rswitch_device *rdev)
1401 rswitch_phy_device_deinit(rdev);
1402 rswitch_mii_unregister(rdev);
1405 static int rswitch_ether_port_init_all(struct rswitch_private *priv)
1409 rswitch_for_each_enabled_port(priv, i) {
1410 err = rswitch_ether_port_init_one(priv->rdev[i]);
1415 rswitch_for_each_enabled_port(priv, i) {
1416 err = phy_init(priv->rdev[i]->serdes);
1424 rswitch_for_each_enabled_port_continue_reverse(priv, i)
1425 phy_exit(priv->rdev[i]->serdes);
1426 i = RSWITCH_NUM_PORTS;
1429 rswitch_for_each_enabled_port_continue_reverse(priv, i)
1430 rswitch_ether_port_deinit_one(priv->rdev[i]);
1435 static void rswitch_ether_port_deinit_all(struct rswitch_private *priv)
1439 for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
1440 phy_exit(priv->rdev[i]->serdes);
1441 rswitch_ether_port_deinit_one(priv->rdev[i]);
1445 static int rswitch_open(struct net_device *ndev)
1447 struct rswitch_device *rdev = netdev_priv(ndev);
1448 unsigned long flags;
1450 phy_start(ndev->phydev);
1452 napi_enable(&rdev->napi);
1453 netif_start_queue(ndev);
1455 spin_lock_irqsave(&rdev->priv->lock, flags);
1456 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true);
1457 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true);
1458 spin_unlock_irqrestore(&rdev->priv->lock, flags);
1460 if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
1461 iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE);
1463 bitmap_set(rdev->priv->opened_ports, rdev->port, 1);
1468 static int rswitch_stop(struct net_device *ndev)
1470 struct rswitch_device *rdev = netdev_priv(ndev);
1471 struct rswitch_gwca_ts_info *ts_info, *ts_info2;
1472 unsigned long flags;
1474 netif_tx_stop_all_queues(ndev);
1475 bitmap_clear(rdev->priv->opened_ports, rdev->port, 1);
1477 if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
1478 iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID);
1480 list_for_each_entry_safe(ts_info, ts_info2, &rdev->priv->gwca.ts_info_list, list) {
1481 if (ts_info->port != rdev->port)
1483 dev_kfree_skb_irq(ts_info->skb);
1484 list_del(&ts_info->list);
1488 spin_lock_irqsave(&rdev->priv->lock, flags);
1489 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
1490 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
1491 spin_unlock_irqrestore(&rdev->priv->lock, flags);
1493 phy_stop(ndev->phydev);
1494 napi_disable(&rdev->napi);
1499 static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1501 struct rswitch_device *rdev = netdev_priv(ndev);
1502 struct rswitch_gwca_queue *gq = rdev->tx_queue;
1503 struct rswitch_ext_desc *desc;
1504 int ret = NETDEV_TX_OK;
1505 dma_addr_t dma_addr;
1507 if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - 1) {
1508 netif_stop_subqueue(ndev, 0);
1509 return NETDEV_TX_BUSY;
1512 if (skb_put_padto(skb, ETH_ZLEN))
1515 dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE);
1516 if (dma_mapping_error(ndev->dev.parent, dma_addr)) {
1517 dev_kfree_skb_any(skb);
1521 gq->skbs[gq->cur] = skb;
1522 desc = &gq->tx_ring[gq->cur];
1523 rswitch_desc_set_dptr(&desc->desc, dma_addr);
1524 desc->desc.info_ds = cpu_to_le16(skb->len);
1526 desc->info1 = cpu_to_le64(INFO1_DV(BIT(rdev->etha->index)) |
1527 INFO1_IPV(GWCA_IPV_NUM) | INFO1_FMT);
1528 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
1529 struct rswitch_gwca_ts_info *ts_info;
1531 ts_info = kzalloc(sizeof(*ts_info), GFP_ATOMIC);
1533 dma_unmap_single(ndev->dev.parent, dma_addr, skb->len, DMA_TO_DEVICE);
1537 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1539 desc->info1 |= cpu_to_le64(INFO1_TSUN(rdev->ts_tag) | INFO1_TXC);
1541 ts_info->skb = skb_get(skb);
1542 ts_info->port = rdev->port;
1543 ts_info->tag = rdev->ts_tag;
1544 list_add_tail(&ts_info->list, &rdev->priv->gwca.ts_info_list);
1546 skb_tx_timestamp(skb);
1551 desc->desc.die_dt = DT_FSINGLE | DIE;
1552 wmb(); /* gq->cur must be incremented after die_dt was set */
1554 gq->cur = rswitch_next_queue_index(gq, true, 1);
1555 rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32));
1560 static struct net_device_stats *rswitch_get_stats(struct net_device *ndev)
1562 return &ndev->stats;
1565 static int rswitch_hwstamp_get(struct net_device *ndev, struct ifreq *req)
1567 struct rswitch_device *rdev = netdev_priv(ndev);
1568 struct rcar_gen4_ptp_private *ptp_priv;
1569 struct hwtstamp_config config;
1571 ptp_priv = rdev->priv->ptp_priv;
1574 config.tx_type = ptp_priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
1576 switch (ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE) {
1577 case RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT:
1578 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
1580 case RCAR_GEN4_RXTSTAMP_TYPE_ALL:
1581 config.rx_filter = HWTSTAMP_FILTER_ALL;
1584 config.rx_filter = HWTSTAMP_FILTER_NONE;
1588 return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0;
1591 static int rswitch_hwstamp_set(struct net_device *ndev, struct ifreq *req)
1593 struct rswitch_device *rdev = netdev_priv(ndev);
1594 u32 tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED;
1595 struct hwtstamp_config config;
1598 if (copy_from_user(&config, req->ifr_data, sizeof(config)))
1604 switch (config.tx_type) {
1605 case HWTSTAMP_TX_OFF:
1608 case HWTSTAMP_TX_ON:
1609 tstamp_tx_ctrl = RCAR_GEN4_TXTSTAMP_ENABLED;
1615 switch (config.rx_filter) {
1616 case HWTSTAMP_FILTER_NONE:
1619 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1620 tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
1623 config.rx_filter = HWTSTAMP_FILTER_ALL;
1624 tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_ALL;
1628 rdev->priv->ptp_priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
1629 rdev->priv->ptp_priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
1631 return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0;
1634 static int rswitch_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
1636 if (!netif_running(ndev))
1641 return rswitch_hwstamp_get(ndev, req);
1643 return rswitch_hwstamp_set(ndev, req);
1645 return phy_mii_ioctl(ndev->phydev, req, cmd);
1649 static const struct net_device_ops rswitch_netdev_ops = {
1650 .ndo_open = rswitch_open,
1651 .ndo_stop = rswitch_stop,
1652 .ndo_start_xmit = rswitch_start_xmit,
1653 .ndo_get_stats = rswitch_get_stats,
1654 .ndo_eth_ioctl = rswitch_eth_ioctl,
1655 .ndo_validate_addr = eth_validate_addr,
1656 .ndo_set_mac_address = eth_mac_addr,
1659 static int rswitch_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
1661 struct rswitch_device *rdev = netdev_priv(ndev);
1663 info->phc_index = ptp_clock_index(rdev->priv->ptp_priv->clock);
1664 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
1665 SOF_TIMESTAMPING_RX_SOFTWARE |
1666 SOF_TIMESTAMPING_SOFTWARE |
1667 SOF_TIMESTAMPING_TX_HARDWARE |
1668 SOF_TIMESTAMPING_RX_HARDWARE |
1669 SOF_TIMESTAMPING_RAW_HARDWARE;
1670 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
1671 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
1676 static const struct ethtool_ops rswitch_ethtool_ops = {
1677 .get_ts_info = rswitch_get_ts_info,
1678 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1679 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1682 static const struct of_device_id renesas_eth_sw_of_table[] = {
1683 { .compatible = "renesas,r8a779f0-ether-switch", },
1686 MODULE_DEVICE_TABLE(of, renesas_eth_sw_of_table);
1688 static void rswitch_etha_init(struct rswitch_private *priv, int index)
1690 struct rswitch_etha *etha = &priv->etha[index];
1692 memset(etha, 0, sizeof(*etha));
1693 etha->index = index;
1694 etha->addr = priv->addr + RSWITCH_ETHA_OFFSET + index * RSWITCH_ETHA_SIZE;
1695 etha->coma_addr = priv->addr;
1698 static int rswitch_device_alloc(struct rswitch_private *priv, int index)
1700 struct platform_device *pdev = priv->pdev;
1701 struct rswitch_device *rdev;
1702 struct net_device *ndev;
1705 if (index >= RSWITCH_NUM_PORTS)
1708 ndev = alloc_etherdev_mqs(sizeof(struct rswitch_device), 1, 1);
1712 SET_NETDEV_DEV(ndev, &pdev->dev);
1715 rdev = netdev_priv(ndev);
1718 priv->rdev[index] = rdev;
1720 rdev->etha = &priv->etha[index];
1721 rdev->addr = priv->addr;
1723 ndev->base_addr = (unsigned long)rdev->addr;
1724 snprintf(ndev->name, IFNAMSIZ, "tsn%d", index);
1725 ndev->netdev_ops = &rswitch_netdev_ops;
1726 ndev->ethtool_ops = &rswitch_ethtool_ops;
1728 netif_napi_add(ndev, &rdev->napi, rswitch_poll);
1730 rdev->np_port = rswitch_get_port_node(rdev);
1731 rdev->disabled = !rdev->np_port;
1732 err = of_get_ethdev_address(rdev->np_port, ndev);
1733 of_node_put(rdev->np_port);
1735 if (is_valid_ether_addr(rdev->etha->mac_addr))
1736 eth_hw_addr_set(ndev, rdev->etha->mac_addr);
1738 eth_hw_addr_random(ndev);
1741 err = rswitch_etha_get_params(rdev);
1743 goto out_get_params;
1745 if (rdev->priv->gwca.speed < rdev->etha->speed)
1746 rdev->priv->gwca.speed = rdev->etha->speed;
1748 err = rswitch_rxdmac_alloc(ndev);
1752 err = rswitch_txdmac_alloc(ndev);
1759 rswitch_rxdmac_free(ndev);
1763 netif_napi_del(&rdev->napi);
1769 static void rswitch_device_free(struct rswitch_private *priv, int index)
1771 struct rswitch_device *rdev = priv->rdev[index];
1772 struct net_device *ndev = rdev->ndev;
1774 rswitch_txdmac_free(ndev);
1775 rswitch_rxdmac_free(ndev);
1776 netif_napi_del(&rdev->napi);
1780 static int rswitch_init(struct rswitch_private *priv)
1784 for (i = 0; i < RSWITCH_NUM_PORTS; i++)
1785 rswitch_etha_init(priv, i);
1787 rswitch_clock_enable(priv);
1788 for (i = 0; i < RSWITCH_NUM_PORTS; i++)
1789 rswitch_etha_read_mac_address(&priv->etha[i]);
1791 rswitch_reset(priv);
1793 rswitch_clock_enable(priv);
1794 rswitch_top_init(priv);
1795 err = rswitch_bpool_config(priv);
1799 rswitch_coma_init(priv);
1801 err = rswitch_gwca_linkfix_alloc(priv);
1805 err = rswitch_gwca_ts_queue_alloc(priv);
1807 goto err_ts_queue_alloc;
1809 for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
1810 err = rswitch_device_alloc(priv, i);
1812 for (i--; i >= 0; i--)
1813 rswitch_device_free(priv, i);
1814 goto err_device_alloc;
1818 rswitch_fwd_init(priv);
1820 err = rcar_gen4_ptp_register(priv->ptp_priv, RCAR_GEN4_PTP_REG_LAYOUT_S4,
1821 RCAR_GEN4_PTP_CLOCK_S4);
1823 goto err_ptp_register;
1825 err = rswitch_gwca_request_irqs(priv);
1827 goto err_gwca_request_irq;
1829 err = rswitch_gwca_ts_request_irqs(priv);
1831 goto err_gwca_ts_request_irq;
1833 err = rswitch_gwca_hw_init(priv);
1835 goto err_gwca_hw_init;
1837 err = rswitch_ether_port_init_all(priv);
1839 goto err_ether_port_init_all;
1841 rswitch_for_each_enabled_port(priv, i) {
1842 err = register_netdev(priv->rdev[i]->ndev);
1844 rswitch_for_each_enabled_port_continue_reverse(priv, i)
1845 unregister_netdev(priv->rdev[i]->ndev);
1846 goto err_register_netdev;
1850 rswitch_for_each_enabled_port(priv, i)
1851 netdev_info(priv->rdev[i]->ndev, "MAC address %pM\n",
1852 priv->rdev[i]->ndev->dev_addr);
1856 err_register_netdev:
1857 rswitch_ether_port_deinit_all(priv);
1859 err_ether_port_init_all:
1860 rswitch_gwca_hw_deinit(priv);
1863 err_gwca_ts_request_irq:
1864 err_gwca_request_irq:
1865 rcar_gen4_ptp_unregister(priv->ptp_priv);
1868 for (i = 0; i < RSWITCH_NUM_PORTS; i++)
1869 rswitch_device_free(priv, i);
1872 rswitch_gwca_ts_queue_free(priv);
1875 rswitch_gwca_linkfix_free(priv);
1880 static const struct soc_device_attribute rswitch_soc_no_speed_change[] = {
1881 { .soc_id = "r8a779f0", .revision = "ES1.0" },
1885 static int renesas_eth_sw_probe(struct platform_device *pdev)
1887 const struct soc_device_attribute *attr;
1888 struct rswitch_private *priv;
1889 struct resource *res;
1892 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "secure_base");
1894 dev_err(&pdev->dev, "invalid resource\n");
1898 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
1901 spin_lock_init(&priv->lock);
1903 attr = soc_device_match(rswitch_soc_no_speed_change);
1905 priv->etha_no_runtime_change = true;
1907 priv->ptp_priv = rcar_gen4_ptp_alloc(pdev);
1908 if (!priv->ptp_priv)
1911 platform_set_drvdata(pdev, priv);
1913 priv->addr = devm_ioremap_resource(&pdev->dev, res);
1914 if (IS_ERR(priv->addr))
1915 return PTR_ERR(priv->addr);
1917 priv->ptp_priv->addr = priv->addr + RCAR_GEN4_GPTP_OFFSET_S4;
1919 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
1921 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1926 priv->gwca.index = AGENT_INDEX_GWCA;
1927 priv->gwca.num_queues = min(RSWITCH_NUM_PORTS * NUM_QUEUES_PER_NDEV,
1928 RSWITCH_MAX_NUM_QUEUES);
1929 priv->gwca.queues = devm_kcalloc(&pdev->dev, priv->gwca.num_queues,
1930 sizeof(*priv->gwca.queues), GFP_KERNEL);
1931 if (!priv->gwca.queues)
1934 pm_runtime_enable(&pdev->dev);
1935 pm_runtime_get_sync(&pdev->dev);
1937 ret = rswitch_init(priv);
1939 pm_runtime_put(&pdev->dev);
1940 pm_runtime_disable(&pdev->dev);
1944 device_set_wakeup_capable(&pdev->dev, 1);
1949 static void rswitch_deinit(struct rswitch_private *priv)
1953 rswitch_gwca_hw_deinit(priv);
1954 rcar_gen4_ptp_unregister(priv->ptp_priv);
1956 for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
1957 struct rswitch_device *rdev = priv->rdev[i];
1959 phy_exit(priv->rdev[i]->serdes);
1960 rswitch_ether_port_deinit_one(rdev);
1961 unregister_netdev(rdev->ndev);
1962 rswitch_device_free(priv, i);
1965 rswitch_gwca_ts_queue_free(priv);
1966 rswitch_gwca_linkfix_free(priv);
1968 rswitch_clock_disable(priv);
1971 static int renesas_eth_sw_remove(struct platform_device *pdev)
1973 struct rswitch_private *priv = platform_get_drvdata(pdev);
1975 rswitch_deinit(priv);
1977 pm_runtime_put(&pdev->dev);
1978 pm_runtime_disable(&pdev->dev);
1980 platform_set_drvdata(pdev, NULL);
1985 static struct platform_driver renesas_eth_sw_driver_platform = {
1986 .probe = renesas_eth_sw_probe,
1987 .remove = renesas_eth_sw_remove,
1989 .name = "renesas_eth_sw",
1990 .of_match_table = renesas_eth_sw_of_table,
1993 module_platform_driver(renesas_eth_sw_driver_platform);
1994 MODULE_AUTHOR("Yoshihiro Shimoda");
1995 MODULE_DESCRIPTION("Renesas Ethernet Switch device driver");
1996 MODULE_LICENSE("GPL");