1 // SPDX-License-Identifier: BSD-3-Clause
2 /* Copyright (c) 2016-2018, NXP Semiconductors
3 * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
5 #include <linux/packing.h>
8 #define SJA1105_SIZE_CGU_CMD 4
9 #define SJA1110_BASE_MCSS_CLK SJA1110_CGU_ADDR(0x70)
10 #define SJA1110_BASE_TIMER_CLK SJA1110_CGU_ADDR(0x74)
12 /* Common structure for CFG_PAD_MIIx_RX and CFG_PAD_MIIx_TX */
13 struct sja1105_cfg_pad_mii {
28 struct sja1105_cfg_pad_mii_id {
40 * IDIV_0_C to IDIV_4_C control registers
41 * (addr. 10000Bh to 10000Fh)
43 struct sja1105_cgu_idiv {
50 /* PLL_1_C control register
52 * SJA1105 E/T: UM10944 Table 81 (address 10000Ah)
53 * SJA1105 P/Q/R/S: UM11040 Table 116 (address 10000Ah)
55 struct sja1105_cgu_pll_ctrl {
66 struct sja1110_cgu_outclk {
73 CLKSRC_MII0_TX_CLK = 0x00,
74 CLKSRC_MII0_RX_CLK = 0x01,
75 CLKSRC_MII1_TX_CLK = 0x02,
76 CLKSRC_MII1_RX_CLK = 0x03,
77 CLKSRC_MII2_TX_CLK = 0x04,
78 CLKSRC_MII2_RX_CLK = 0x05,
79 CLKSRC_MII3_TX_CLK = 0x06,
80 CLKSRC_MII3_RX_CLK = 0x07,
81 CLKSRC_MII4_TX_CLK = 0x08,
82 CLKSRC_MII4_RX_CLK = 0x09,
93 * MIIx clock control registers 1 to 30
94 * (addresses 100013h to 100035h)
96 struct sja1105_cgu_mii_ctrl {
102 static void sja1105_cgu_idiv_packing(void *buf, struct sja1105_cgu_idiv *idiv,
107 sja1105_packing(buf, &idiv->clksrc, 28, 24, size, op);
108 sja1105_packing(buf, &idiv->autoblock, 11, 11, size, op);
109 sja1105_packing(buf, &idiv->idiv, 5, 2, size, op);
110 sja1105_packing(buf, &idiv->pd, 0, 0, size, op);
113 static int sja1105_cgu_idiv_config(struct sja1105_private *priv, int port,
114 bool enabled, int factor)
116 const struct sja1105_regs *regs = priv->info->regs;
117 struct device *dev = priv->ds->dev;
118 struct sja1105_cgu_idiv idiv;
119 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
121 if (regs->cgu_idiv[port] == SJA1105_RSV_ADDR)
124 if (enabled && factor != 1 && factor != 10) {
125 dev_err(dev, "idiv factor must be 1 or 10\n");
129 /* Payload for packed_buf */
130 idiv.clksrc = 0x0A; /* 25MHz */
131 idiv.autoblock = 1; /* Block clk automatically */
132 idiv.idiv = factor - 1; /* Divide by 1 or 10 */
133 idiv.pd = enabled ? 0 : 1; /* Power down? */
134 sja1105_cgu_idiv_packing(packed_buf, &idiv, PACK);
136 return sja1105_xfer_buf(priv, SPI_WRITE, regs->cgu_idiv[port],
137 packed_buf, SJA1105_SIZE_CGU_CMD);
141 sja1105_cgu_mii_control_packing(void *buf, struct sja1105_cgu_mii_ctrl *cmd,
146 sja1105_packing(buf, &cmd->clksrc, 28, 24, size, op);
147 sja1105_packing(buf, &cmd->autoblock, 11, 11, size, op);
148 sja1105_packing(buf, &cmd->pd, 0, 0, size, op);
151 static int sja1105_cgu_mii_tx_clk_config(struct sja1105_private *priv,
152 int port, sja1105_mii_role_t role)
154 const struct sja1105_regs *regs = priv->info->regs;
155 struct sja1105_cgu_mii_ctrl mii_tx_clk;
156 const int mac_clk_sources[] = {
163 const int phy_clk_sources[] = {
170 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
173 if (regs->mii_tx_clk[port] == SJA1105_RSV_ADDR)
176 if (role == XMII_MAC)
177 clksrc = mac_clk_sources[port];
179 clksrc = phy_clk_sources[port];
181 /* Payload for packed_buf */
182 mii_tx_clk.clksrc = clksrc;
183 mii_tx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
184 mii_tx_clk.pd = 0; /* Power Down off => enabled */
185 sja1105_cgu_mii_control_packing(packed_buf, &mii_tx_clk, PACK);
187 return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_tx_clk[port],
188 packed_buf, SJA1105_SIZE_CGU_CMD);
192 sja1105_cgu_mii_rx_clk_config(struct sja1105_private *priv, int port)
194 const struct sja1105_regs *regs = priv->info->regs;
195 struct sja1105_cgu_mii_ctrl mii_rx_clk;
196 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
197 const int clk_sources[] = {
205 if (regs->mii_rx_clk[port] == SJA1105_RSV_ADDR)
208 /* Payload for packed_buf */
209 mii_rx_clk.clksrc = clk_sources[port];
210 mii_rx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
211 mii_rx_clk.pd = 0; /* Power Down off => enabled */
212 sja1105_cgu_mii_control_packing(packed_buf, &mii_rx_clk, PACK);
214 return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_rx_clk[port],
215 packed_buf, SJA1105_SIZE_CGU_CMD);
219 sja1105_cgu_mii_ext_tx_clk_config(struct sja1105_private *priv, int port)
221 const struct sja1105_regs *regs = priv->info->regs;
222 struct sja1105_cgu_mii_ctrl mii_ext_tx_clk;
223 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
224 const int clk_sources[] = {
232 if (regs->mii_ext_tx_clk[port] == SJA1105_RSV_ADDR)
235 /* Payload for packed_buf */
236 mii_ext_tx_clk.clksrc = clk_sources[port];
237 mii_ext_tx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
238 mii_ext_tx_clk.pd = 0; /* Power Down off => enabled */
239 sja1105_cgu_mii_control_packing(packed_buf, &mii_ext_tx_clk, PACK);
241 return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_ext_tx_clk[port],
242 packed_buf, SJA1105_SIZE_CGU_CMD);
246 sja1105_cgu_mii_ext_rx_clk_config(struct sja1105_private *priv, int port)
248 const struct sja1105_regs *regs = priv->info->regs;
249 struct sja1105_cgu_mii_ctrl mii_ext_rx_clk;
250 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
251 const int clk_sources[] = {
259 if (regs->mii_ext_rx_clk[port] == SJA1105_RSV_ADDR)
262 /* Payload for packed_buf */
263 mii_ext_rx_clk.clksrc = clk_sources[port];
264 mii_ext_rx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
265 mii_ext_rx_clk.pd = 0; /* Power Down off => enabled */
266 sja1105_cgu_mii_control_packing(packed_buf, &mii_ext_rx_clk, PACK);
268 return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_ext_rx_clk[port],
269 packed_buf, SJA1105_SIZE_CGU_CMD);
272 static int sja1105_mii_clocking_setup(struct sja1105_private *priv, int port,
273 sja1105_mii_role_t role)
275 struct device *dev = priv->ds->dev;
278 dev_dbg(dev, "Configuring MII-%s clocking\n",
279 (role == XMII_MAC) ? "MAC" : "PHY");
280 /* If role is MAC, disable IDIV
281 * If role is PHY, enable IDIV and configure for 1/1 divider
283 rc = sja1105_cgu_idiv_config(priv, port, (role == XMII_PHY), 1);
287 /* Configure CLKSRC of MII_TX_CLK_n
288 * * If role is MAC, select TX_CLK_n
289 * * If role is PHY, select IDIV_n
291 rc = sja1105_cgu_mii_tx_clk_config(priv, port, role);
295 /* Configure CLKSRC of MII_RX_CLK_n
298 rc = sja1105_cgu_mii_rx_clk_config(priv, port);
302 if (role == XMII_PHY) {
303 /* Per MII spec, the PHY (which is us) drives the TX_CLK pin */
305 /* Configure CLKSRC of EXT_TX_CLK_n
308 rc = sja1105_cgu_mii_ext_tx_clk_config(priv, port);
312 /* Configure CLKSRC of EXT_RX_CLK_n
315 rc = sja1105_cgu_mii_ext_rx_clk_config(priv, port);
323 sja1105_cgu_pll_control_packing(void *buf, struct sja1105_cgu_pll_ctrl *cmd,
328 sja1105_packing(buf, &cmd->pllclksrc, 28, 24, size, op);
329 sja1105_packing(buf, &cmd->msel, 23, 16, size, op);
330 sja1105_packing(buf, &cmd->autoblock, 11, 11, size, op);
331 sja1105_packing(buf, &cmd->psel, 9, 8, size, op);
332 sja1105_packing(buf, &cmd->direct, 7, 7, size, op);
333 sja1105_packing(buf, &cmd->fbsel, 6, 6, size, op);
334 sja1105_packing(buf, &cmd->bypass, 1, 1, size, op);
335 sja1105_packing(buf, &cmd->pd, 0, 0, size, op);
338 static int sja1105_cgu_rgmii_tx_clk_config(struct sja1105_private *priv,
341 const struct sja1105_regs *regs = priv->info->regs;
342 struct sja1105_cgu_mii_ctrl txc;
343 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
346 if (regs->rgmii_tx_clk[port] == SJA1105_RSV_ADDR)
349 if (speed == priv->info->port_speed[SJA1105_SPEED_1000MBPS]) {
350 clksrc = CLKSRC_PLL0;
352 int clk_sources[] = {CLKSRC_IDIV0, CLKSRC_IDIV1, CLKSRC_IDIV2,
353 CLKSRC_IDIV3, CLKSRC_IDIV4};
354 clksrc = clk_sources[port];
357 /* RGMII: 125MHz for 1000, 25MHz for 100, 2.5MHz for 10 */
359 /* Autoblock clk while changing clksrc */
361 /* Power Down off => enabled */
363 sja1105_cgu_mii_control_packing(packed_buf, &txc, PACK);
365 return sja1105_xfer_buf(priv, SPI_WRITE, regs->rgmii_tx_clk[port],
366 packed_buf, SJA1105_SIZE_CGU_CMD);
371 sja1105_cfg_pad_mii_packing(void *buf, struct sja1105_cfg_pad_mii *cmd,
376 sja1105_packing(buf, &cmd->d32_os, 28, 27, size, op);
377 sja1105_packing(buf, &cmd->d32_ih, 26, 26, size, op);
378 sja1105_packing(buf, &cmd->d32_ipud, 25, 24, size, op);
379 sja1105_packing(buf, &cmd->d10_os, 20, 19, size, op);
380 sja1105_packing(buf, &cmd->d10_ih, 18, 18, size, op);
381 sja1105_packing(buf, &cmd->d10_ipud, 17, 16, size, op);
382 sja1105_packing(buf, &cmd->ctrl_os, 12, 11, size, op);
383 sja1105_packing(buf, &cmd->ctrl_ih, 10, 10, size, op);
384 sja1105_packing(buf, &cmd->ctrl_ipud, 9, 8, size, op);
385 sja1105_packing(buf, &cmd->clk_os, 4, 3, size, op);
386 sja1105_packing(buf, &cmd->clk_ih, 2, 2, size, op);
387 sja1105_packing(buf, &cmd->clk_ipud, 1, 0, size, op);
390 static int sja1105_rgmii_cfg_pad_tx_config(struct sja1105_private *priv,
393 const struct sja1105_regs *regs = priv->info->regs;
394 struct sja1105_cfg_pad_mii pad_mii_tx = {0};
395 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
397 if (regs->pad_mii_tx[port] == SJA1105_RSV_ADDR)
401 pad_mii_tx.d32_os = 3; /* TXD[3:2] output stage: */
402 /* high noise/high speed */
403 pad_mii_tx.d10_os = 3; /* TXD[1:0] output stage: */
404 /* high noise/high speed */
405 pad_mii_tx.d32_ipud = 2; /* TXD[3:2] input stage: */
406 /* plain input (default) */
407 pad_mii_tx.d10_ipud = 2; /* TXD[1:0] input stage: */
408 /* plain input (default) */
409 pad_mii_tx.ctrl_os = 3; /* TX_CTL / TX_ER output stage */
410 pad_mii_tx.ctrl_ipud = 2; /* TX_CTL / TX_ER input stage (default) */
411 pad_mii_tx.clk_os = 3; /* TX_CLK output stage */
412 pad_mii_tx.clk_ih = 0; /* TX_CLK input hysteresis (default) */
413 pad_mii_tx.clk_ipud = 2; /* TX_CLK input stage (default) */
414 sja1105_cfg_pad_mii_packing(packed_buf, &pad_mii_tx, PACK);
416 return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_tx[port],
417 packed_buf, SJA1105_SIZE_CGU_CMD);
420 static int sja1105_cfg_pad_rx_config(struct sja1105_private *priv, int port)
422 const struct sja1105_regs *regs = priv->info->regs;
423 struct sja1105_cfg_pad_mii pad_mii_rx = {0};
424 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
426 if (regs->pad_mii_rx[port] == SJA1105_RSV_ADDR)
430 pad_mii_rx.d32_ih = 0; /* RXD[3:2] input stage hysteresis: */
431 /* non-Schmitt (default) */
432 pad_mii_rx.d32_ipud = 2; /* RXD[3:2] input weak pull-up/down */
433 /* plain input (default) */
434 pad_mii_rx.d10_ih = 0; /* RXD[1:0] input stage hysteresis: */
435 /* non-Schmitt (default) */
436 pad_mii_rx.d10_ipud = 2; /* RXD[1:0] input weak pull-up/down */
437 /* plain input (default) */
438 pad_mii_rx.ctrl_ih = 0; /* RX_DV/CRS_DV/RX_CTL and RX_ER */
439 /* input stage hysteresis: */
440 /* non-Schmitt (default) */
441 pad_mii_rx.ctrl_ipud = 3; /* RX_DV/CRS_DV/RX_CTL and RX_ER */
442 /* input stage weak pull-up/down: */
444 pad_mii_rx.clk_os = 2; /* RX_CLK/RXC output stage: */
445 /* medium noise/fast speed (default) */
446 pad_mii_rx.clk_ih = 0; /* RX_CLK/RXC input hysteresis: */
447 /* non-Schmitt (default) */
448 pad_mii_rx.clk_ipud = 2; /* RX_CLK/RXC input pull-up/down: */
449 /* plain input (default) */
450 sja1105_cfg_pad_mii_packing(packed_buf, &pad_mii_rx, PACK);
452 return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_rx[port],
453 packed_buf, SJA1105_SIZE_CGU_CMD);
457 sja1105_cfg_pad_mii_id_packing(void *buf, struct sja1105_cfg_pad_mii_id *cmd,
460 const int size = SJA1105_SIZE_CGU_CMD;
462 sja1105_packing(buf, &cmd->rxc_stable_ovr, 15, 15, size, op);
463 sja1105_packing(buf, &cmd->rxc_delay, 14, 10, size, op);
464 sja1105_packing(buf, &cmd->rxc_bypass, 9, 9, size, op);
465 sja1105_packing(buf, &cmd->rxc_pd, 8, 8, size, op);
466 sja1105_packing(buf, &cmd->txc_stable_ovr, 7, 7, size, op);
467 sja1105_packing(buf, &cmd->txc_delay, 6, 2, size, op);
468 sja1105_packing(buf, &cmd->txc_bypass, 1, 1, size, op);
469 sja1105_packing(buf, &cmd->txc_pd, 0, 0, size, op);
473 sja1110_cfg_pad_mii_id_packing(void *buf, struct sja1105_cfg_pad_mii_id *cmd,
476 const int size = SJA1105_SIZE_CGU_CMD;
479 /* Fields RXC_RANGE and TXC_RANGE select the input frequency range:
484 * 4 = Automatically determined by port speed.
485 * There's no point in defining a structure different than the one for
486 * SJA1105, so just hardcode the frequency range to automatic, just as
489 sja1105_packing(buf, &cmd->rxc_stable_ovr, 26, 26, size, op);
490 sja1105_packing(buf, &cmd->rxc_delay, 25, 21, size, op);
491 sja1105_packing(buf, &range, 20, 18, size, op);
492 sja1105_packing(buf, &cmd->rxc_bypass, 17, 17, size, op);
493 sja1105_packing(buf, &cmd->rxc_pd, 16, 16, size, op);
494 sja1105_packing(buf, &cmd->txc_stable_ovr, 10, 10, size, op);
495 sja1105_packing(buf, &cmd->txc_delay, 9, 5, size, op);
496 sja1105_packing(buf, &range, 4, 2, size, op);
497 sja1105_packing(buf, &cmd->txc_bypass, 1, 1, size, op);
498 sja1105_packing(buf, &cmd->txc_pd, 0, 0, size, op);
501 /* Valid range in degrees is an integer between 73.8 and 101.7 */
502 static u64 sja1105_rgmii_delay(u64 phase)
504 /* UM11040.pdf: The delay in degree phase is 73.8 + delay_tune * 0.9.
505 * To avoid floating point operations we'll multiply by 10
506 * and get 1 decimal point precision.
509 return (phase - 738) / 9;
512 /* The RGMII delay setup procedure is 2-step and gets called upon each
513 * .phylink_mac_config. Both are strategic.
514 * The reason is that the RX Tunable Delay Line of the SJA1105 MAC has issues
515 * with recovering from a frequency change of the link partner's RGMII clock.
516 * The easiest way to recover from this is to temporarily power down the TDL,
517 * as it will re-lock at the new frequency afterwards.
519 int sja1105pqrs_setup_rgmii_delay(const void *ctx, int port)
521 const struct sja1105_private *priv = ctx;
522 const struct sja1105_regs *regs = priv->info->regs;
523 struct sja1105_cfg_pad_mii_id pad_mii_id = {0};
524 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
527 if (priv->rgmii_rx_delay[port])
528 pad_mii_id.rxc_delay = sja1105_rgmii_delay(90);
529 if (priv->rgmii_tx_delay[port])
530 pad_mii_id.txc_delay = sja1105_rgmii_delay(90);
532 /* Stage 1: Turn the RGMII delay lines off. */
533 pad_mii_id.rxc_bypass = 1;
534 pad_mii_id.rxc_pd = 1;
535 pad_mii_id.txc_bypass = 1;
536 pad_mii_id.txc_pd = 1;
537 sja1105_cfg_pad_mii_id_packing(packed_buf, &pad_mii_id, PACK);
539 rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_id[port],
540 packed_buf, SJA1105_SIZE_CGU_CMD);
544 /* Stage 2: Turn the RGMII delay lines on. */
545 if (priv->rgmii_rx_delay[port]) {
546 pad_mii_id.rxc_bypass = 0;
547 pad_mii_id.rxc_pd = 0;
549 if (priv->rgmii_tx_delay[port]) {
550 pad_mii_id.txc_bypass = 0;
551 pad_mii_id.txc_pd = 0;
553 sja1105_cfg_pad_mii_id_packing(packed_buf, &pad_mii_id, PACK);
555 return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_id[port],
556 packed_buf, SJA1105_SIZE_CGU_CMD);
559 int sja1110_setup_rgmii_delay(const void *ctx, int port)
561 const struct sja1105_private *priv = ctx;
562 const struct sja1105_regs *regs = priv->info->regs;
563 struct sja1105_cfg_pad_mii_id pad_mii_id = {0};
564 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
566 pad_mii_id.rxc_pd = 1;
567 pad_mii_id.txc_pd = 1;
569 if (priv->rgmii_rx_delay[port]) {
570 pad_mii_id.rxc_delay = sja1105_rgmii_delay(90);
571 /* The "BYPASS" bit in SJA1110 is actually a "don't bypass" */
572 pad_mii_id.rxc_bypass = 1;
573 pad_mii_id.rxc_pd = 0;
576 if (priv->rgmii_tx_delay[port]) {
577 pad_mii_id.txc_delay = sja1105_rgmii_delay(90);
578 pad_mii_id.txc_bypass = 1;
579 pad_mii_id.txc_pd = 0;
582 sja1110_cfg_pad_mii_id_packing(packed_buf, &pad_mii_id, PACK);
584 return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_id[port],
585 packed_buf, SJA1105_SIZE_CGU_CMD);
588 static int sja1105_rgmii_clocking_setup(struct sja1105_private *priv, int port,
589 sja1105_mii_role_t role)
591 struct device *dev = priv->ds->dev;
592 struct sja1105_mac_config_entry *mac;
596 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
597 speed = mac[port].speed;
599 dev_dbg(dev, "Configuring port %d RGMII at speed %lldMbps\n",
602 if (speed == priv->info->port_speed[SJA1105_SPEED_1000MBPS]) {
603 /* 1000Mbps, IDIV disabled (125 MHz) */
604 rc = sja1105_cgu_idiv_config(priv, port, false, 1);
605 } else if (speed == priv->info->port_speed[SJA1105_SPEED_100MBPS]) {
606 /* 100Mbps, IDIV enabled, divide by 1 (25 MHz) */
607 rc = sja1105_cgu_idiv_config(priv, port, true, 1);
608 } else if (speed == priv->info->port_speed[SJA1105_SPEED_10MBPS]) {
609 /* 10Mbps, IDIV enabled, divide by 10 (2.5 MHz) */
610 rc = sja1105_cgu_idiv_config(priv, port, true, 10);
611 } else if (speed == priv->info->port_speed[SJA1105_SPEED_AUTO]) {
612 /* Skip CGU configuration if there is no speed available
613 * (e.g. link is not established yet)
615 dev_dbg(dev, "Speed not available, skipping CGU config\n");
622 dev_err(dev, "Failed to configure idiv\n");
625 rc = sja1105_cgu_rgmii_tx_clk_config(priv, port, speed);
627 dev_err(dev, "Failed to configure RGMII Tx clock\n");
630 rc = sja1105_rgmii_cfg_pad_tx_config(priv, port);
632 dev_err(dev, "Failed to configure Tx pad registers\n");
636 if (!priv->info->setup_rgmii_delay)
639 return priv->info->setup_rgmii_delay(priv, port);
642 static int sja1105_cgu_rmii_ref_clk_config(struct sja1105_private *priv,
645 const struct sja1105_regs *regs = priv->info->regs;
646 struct sja1105_cgu_mii_ctrl ref_clk;
647 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
648 const int clk_sources[] = {
656 if (regs->rmii_ref_clk[port] == SJA1105_RSV_ADDR)
659 /* Payload for packed_buf */
660 ref_clk.clksrc = clk_sources[port];
661 ref_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
662 ref_clk.pd = 0; /* Power Down off => enabled */
663 sja1105_cgu_mii_control_packing(packed_buf, &ref_clk, PACK);
665 return sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_ref_clk[port],
666 packed_buf, SJA1105_SIZE_CGU_CMD);
670 sja1105_cgu_rmii_ext_tx_clk_config(struct sja1105_private *priv, int port)
672 const struct sja1105_regs *regs = priv->info->regs;
673 struct sja1105_cgu_mii_ctrl ext_tx_clk;
674 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
676 if (regs->rmii_ext_tx_clk[port] == SJA1105_RSV_ADDR)
679 /* Payload for packed_buf */
680 ext_tx_clk.clksrc = CLKSRC_PLL1;
681 ext_tx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
682 ext_tx_clk.pd = 0; /* Power Down off => enabled */
683 sja1105_cgu_mii_control_packing(packed_buf, &ext_tx_clk, PACK);
685 return sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_ext_tx_clk[port],
686 packed_buf, SJA1105_SIZE_CGU_CMD);
689 static int sja1105_cgu_rmii_pll_config(struct sja1105_private *priv)
691 const struct sja1105_regs *regs = priv->info->regs;
692 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
693 struct sja1105_cgu_pll_ctrl pll = {0};
694 struct device *dev = priv->ds->dev;
697 if (regs->rmii_pll1 == SJA1105_RSV_ADDR)
700 /* PLL1 must be enabled and output 50 Mhz.
701 * This is done by writing first 0x0A010941 to
702 * the PLL_1_C register and then deasserting
703 * power down (PD) 0x0A010940.
706 /* Step 1: PLL1 setup for 50Mhz */
716 sja1105_cgu_pll_control_packing(packed_buf, &pll, PACK);
717 rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_pll1, packed_buf,
718 SJA1105_SIZE_CGU_CMD);
720 dev_err(dev, "failed to configure PLL1 for 50MHz\n");
724 /* Step 2: Enable PLL1 */
727 sja1105_cgu_pll_control_packing(packed_buf, &pll, PACK);
728 rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_pll1, packed_buf,
729 SJA1105_SIZE_CGU_CMD);
731 dev_err(dev, "failed to enable PLL1\n");
737 static int sja1105_rmii_clocking_setup(struct sja1105_private *priv, int port,
738 sja1105_mii_role_t role)
740 struct device *dev = priv->ds->dev;
743 dev_dbg(dev, "Configuring RMII-%s clocking\n",
744 (role == XMII_MAC) ? "MAC" : "PHY");
745 /* AH1601.pdf chapter 2.5.1. Sources */
746 if (role == XMII_MAC) {
747 /* Configure and enable PLL1 for 50Mhz output */
748 rc = sja1105_cgu_rmii_pll_config(priv);
752 /* Disable IDIV for this port */
753 rc = sja1105_cgu_idiv_config(priv, port, false, 1);
756 /* Source to sink mappings */
757 rc = sja1105_cgu_rmii_ref_clk_config(priv, port);
760 if (role == XMII_MAC) {
761 rc = sja1105_cgu_rmii_ext_tx_clk_config(priv, port);
768 int sja1105_clocking_setup_port(struct sja1105_private *priv, int port)
770 struct sja1105_xmii_params_entry *mii;
771 struct device *dev = priv->ds->dev;
772 sja1105_phy_interface_t phy_mode;
773 sja1105_mii_role_t role;
776 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
779 phy_mode = mii->xmii_mode[port];
780 /* MAC or PHY, for applicable types (not RGMII) */
781 role = mii->phy_mac[port];
785 rc = sja1105_mii_clocking_setup(priv, port, role);
788 rc = sja1105_rmii_clocking_setup(priv, port, role);
790 case XMII_MODE_RGMII:
791 rc = sja1105_rgmii_clocking_setup(priv, port, role);
793 case XMII_MODE_SGMII:
794 /* Nothing to do in the CGU for SGMII */
798 dev_err(dev, "Invalid interface mode specified: %d\n",
803 dev_err(dev, "Clocking setup for port %d failed: %d\n",
808 /* Internally pull down the RX_DV/CRS_DV/RX_CTL and RX_ER inputs */
809 return sja1105_cfg_pad_rx_config(priv, port);
812 int sja1105_clocking_setup(struct sja1105_private *priv)
814 struct dsa_switch *ds = priv->ds;
817 for (port = 0; port < ds->num_ports; port++) {
818 rc = sja1105_clocking_setup_port(priv, port);
826 sja1110_cgu_outclk_packing(void *buf, struct sja1110_cgu_outclk *outclk,
831 sja1105_packing(buf, &outclk->clksrc, 27, 24, size, op);
832 sja1105_packing(buf, &outclk->autoblock, 11, 11, size, op);
833 sja1105_packing(buf, &outclk->pd, 0, 0, size, op);
836 int sja1110_disable_microcontroller(struct sja1105_private *priv)
838 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
839 struct sja1110_cgu_outclk outclk_6_c = {
843 struct sja1110_cgu_outclk outclk_7_c = {
849 /* Power down the BASE_TIMER_CLK to disable the watchdog timer */
850 sja1110_cgu_outclk_packing(packed_buf, &outclk_7_c, PACK);
852 rc = sja1105_xfer_buf(priv, SPI_WRITE, SJA1110_BASE_TIMER_CLK,
853 packed_buf, SJA1105_SIZE_CGU_CMD);
857 /* Power down the BASE_MCSS_CLOCK to gate the microcontroller off */
858 sja1110_cgu_outclk_packing(packed_buf, &outclk_6_c, PACK);
860 return sja1105_xfer_buf(priv, SPI_WRITE, SJA1110_BASE_MCSS_CLK,
861 packed_buf, SJA1105_SIZE_CGU_CMD);