1 // SPDX-License-Identifier: GPL-2.0-only
3 // Apple SoCs MCA driver
5 // Copyright (C) The Asahi Linux Contributors
7 // The MCA peripheral is made up of a number of identical units called clusters.
8 // Each cluster has its separate clock parent, SYNC signal generator, carries
9 // four SERDES units and has a dedicated I2S port on the SoC's periphery.
11 // The clusters can operate independently, or can be combined together in a
12 // configurable manner. We mostly treat them as self-contained independent
13 // units and don't configure any cross-cluster connections except for the I2S
14 // ports. The I2S ports can be routed to any of the clusters (irrespective
15 // of their native cluster). We map this onto ASoC's (DPCM) notion of backend
16 // and frontend DAIs. The 'cluster guts' are frontends which are dynamically
17 // routed to backend I2S ports.
19 // DAI references in devicetree are resolved to backends. The routing between
20 // frontends and backends is determined by the machine driver in the DAPM paths
23 #include <linux/bitfield.h>
24 #include <linux/clk.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/init.h>
27 #include <linux/kernel.h>
28 #include <linux/module.h>
30 #include <linux/of_clk.h>
31 #include <linux/of_dma.h>
32 #include <linux/platform_device.h>
33 #include <linux/pm_domain.h>
34 #include <linux/regmap.h>
35 #include <linux/reset.h>
36 #include <linux/slab.h>
38 #include <sound/core.h>
39 #include <sound/pcm.h>
40 #include <sound/pcm_params.h>
41 #include <sound/soc.h>
42 #include <sound/dmaengine_pcm.h>
44 #define USE_RXB_FOR_CAPTURE
46 /* Relative to cluster base */
47 #define REG_STATUS 0x0
48 #define STATUS_MCLK_EN BIT(0)
49 #define REG_MCLK_CONF 0x4
50 #define MCLK_CONF_DIV GENMASK(11, 8)
52 #define REG_SYNCGEN_STATUS 0x100
53 #define SYNCGEN_STATUS_EN BIT(0)
54 #define REG_SYNCGEN_MCLK_SEL 0x104
55 #define SYNCGEN_MCLK_SEL GENMASK(3, 0)
56 #define REG_SYNCGEN_HI_PERIOD 0x108
57 #define REG_SYNCGEN_LO_PERIOD 0x10c
59 #define REG_PORT_ENABLES 0x600
60 #define PORT_ENABLES_CLOCKS GENMASK(2, 1)
61 #define PORT_ENABLES_TX_DATA BIT(3)
62 #define REG_PORT_CLOCK_SEL 0x604
63 #define PORT_CLOCK_SEL GENMASK(11, 8)
64 #define REG_PORT_DATA_SEL 0x608
65 #define PORT_DATA_SEL_TXA(cl) (1 << ((cl)*2))
66 #define PORT_DATA_SEL_TXB(cl) (2 << ((cl)*2))
68 #define REG_INTSTATE 0x700
69 #define REG_INTMASK 0x704
71 /* Bases of serdes units (relative to cluster) */
72 #define CLUSTER_RXA_OFF 0x200
73 #define CLUSTER_TXA_OFF 0x300
74 #define CLUSTER_RXB_OFF 0x400
75 #define CLUSTER_TXB_OFF 0x500
77 #define CLUSTER_TX_OFF CLUSTER_TXA_OFF
79 #ifndef USE_RXB_FOR_CAPTURE
80 #define CLUSTER_RX_OFF CLUSTER_RXA_OFF
82 #define CLUSTER_RX_OFF CLUSTER_RXB_OFF
85 /* Relative to serdes unit base */
86 #define REG_SERDES_STATUS 0x00
87 #define SERDES_STATUS_EN BIT(0)
88 #define SERDES_STATUS_RST BIT(1)
89 #define REG_TX_SERDES_CONF 0x04
90 #define REG_RX_SERDES_CONF 0x08
91 #define SERDES_CONF_NCHANS GENMASK(3, 0)
92 #define SERDES_CONF_WIDTH_MASK GENMASK(8, 4)
93 #define SERDES_CONF_WIDTH_16BIT 0x40
94 #define SERDES_CONF_WIDTH_20BIT 0x80
95 #define SERDES_CONF_WIDTH_24BIT 0xc0
96 #define SERDES_CONF_WIDTH_32BIT 0x100
97 #define SERDES_CONF_BCLK_POL 0x400
98 #define SERDES_CONF_LSB_FIRST 0x800
99 #define SERDES_CONF_UNK1 BIT(12)
100 #define SERDES_CONF_UNK2 BIT(13)
101 #define SERDES_CONF_UNK3 BIT(14)
102 #define SERDES_CONF_NO_DATA_FEEDBACK BIT(15)
103 #define SERDES_CONF_SYNC_SEL GENMASK(18, 16)
104 #define SERDES_CONF_SOME_RST BIT(19)
105 #define REG_TX_SERDES_BITSTART 0x08
106 #define REG_RX_SERDES_BITSTART 0x0c
107 #define REG_TX_SERDES_SLOTMASK 0x0c
108 #define REG_RX_SERDES_SLOTMASK 0x10
109 #define REG_RX_SERDES_PORT 0x04
111 /* Relative to switch base */
112 #define REG_DMA_ADAPTER_A(cl) (0x8000 * (cl))
113 #define REG_DMA_ADAPTER_B(cl) (0x8000 * (cl) + 0x4000)
114 #define DMA_ADAPTER_TX_LSB_PAD GENMASK(4, 0)
115 #define DMA_ADAPTER_TX_NCHANS GENMASK(6, 5)
116 #define DMA_ADAPTER_RX_MSB_PAD GENMASK(12, 8)
117 #define DMA_ADAPTER_RX_NCHANS GENMASK(14, 13)
118 #define DMA_ADAPTER_NCHANS GENMASK(22, 20)
120 #define SWITCH_STRIDE 0x8000
121 #define CLUSTER_STRIDE 0x4000
123 #define MAX_NCLUSTERS 6
125 #define APPLE_MCA_FMTBITS (SNDRV_PCM_FMTBIT_S16_LE | \
126 SNDRV_PCM_FMTBIT_S24_LE | \
127 SNDRV_PCM_FMTBIT_S32_LE)
132 struct mca_data *host;
133 struct device *pd_dev;
134 struct clk *clk_parent;
135 struct dma_chan *dma_chans[SNDRV_PCM_STREAM_LAST + 1];
137 bool port_started[SNDRV_PCM_STREAM_LAST + 1];
138 int port_driver; /* The cluster driving this cluster's port */
140 bool clocks_in_use[SNDRV_PCM_STREAM_LAST + 1];
141 struct device_link *pd_link;
143 unsigned int bclk_ratio;
145 /* Masks etc. picked up via the set_tdm_slot method */
148 unsigned int tdm_tx_mask;
149 unsigned int tdm_rx_mask;
155 __iomem void *switch_base;
157 struct device *pd_dev;
158 struct reset_control *rstc;
159 struct device_link *pd_link;
161 /* Mutex for accessing port_driver of foreign clusters */
162 struct mutex port_mutex;
165 struct mca_cluster clusters[];
168 static void mca_modify(struct mca_cluster *cl, int regoffset, u32 mask, u32 val)
170 __iomem void *ptr = cl->base + regoffset;
173 newval = (val & mask) | (readl_relaxed(ptr) & ~mask);
174 writel_relaxed(newval, ptr);
178 * Get the cluster of FE or BE DAI
180 static struct mca_cluster *mca_dai_to_cluster(struct snd_soc_dai *dai)
182 struct mca_data *mca = snd_soc_dai_get_drvdata(dai);
184 * FE DAIs are 0 ... nclusters - 1
185 * BE DAIs are nclusters ... 2*nclusters - 1
187 int cluster_no = dai->id % mca->nclusters;
189 return &mca->clusters[cluster_no];
192 /* called before PCM trigger */
193 static void mca_fe_early_trigger(struct snd_pcm_substream *substream, int cmd,
194 struct snd_soc_dai *dai)
196 struct mca_cluster *cl = mca_dai_to_cluster(dai);
197 bool is_tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
198 int serdes_unit = is_tx ? CLUSTER_TX_OFF : CLUSTER_RX_OFF;
200 serdes_unit + (is_tx ? REG_TX_SERDES_CONF : REG_RX_SERDES_CONF);
203 case SNDRV_PCM_TRIGGER_START:
204 case SNDRV_PCM_TRIGGER_RESUME:
205 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
206 mca_modify(cl, serdes_unit + REG_SERDES_STATUS,
207 SERDES_STATUS_EN | SERDES_STATUS_RST,
209 mca_modify(cl, serdes_conf, SERDES_CONF_SOME_RST,
210 SERDES_CONF_SOME_RST);
211 readl_relaxed(cl->base + serdes_conf);
212 mca_modify(cl, serdes_conf, SERDES_STATUS_RST, 0);
213 WARN_ON(readl_relaxed(cl->base + REG_SERDES_STATUS) &
221 static int mca_fe_trigger(struct snd_pcm_substream *substream, int cmd,
222 struct snd_soc_dai *dai)
224 struct mca_cluster *cl = mca_dai_to_cluster(dai);
225 bool is_tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
226 int serdes_unit = is_tx ? CLUSTER_TX_OFF : CLUSTER_RX_OFF;
229 case SNDRV_PCM_TRIGGER_START:
230 case SNDRV_PCM_TRIGGER_RESUME:
231 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
232 mca_modify(cl, serdes_unit + REG_SERDES_STATUS,
233 SERDES_STATUS_EN | SERDES_STATUS_RST,
237 case SNDRV_PCM_TRIGGER_STOP:
238 case SNDRV_PCM_TRIGGER_SUSPEND:
239 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
240 mca_modify(cl, serdes_unit + REG_SERDES_STATUS,
241 SERDES_STATUS_EN, 0);
251 static int mca_fe_enable_clocks(struct mca_cluster *cl)
253 struct mca_data *mca = cl->host;
256 ret = clk_prepare_enable(cl->clk_parent);
259 "cluster %d: unable to enable clock parent: %d\n",
265 * We can't power up the device earlier than this because
266 * the power state driver would error out on seeing the device
269 cl->pd_link = device_link_add(mca->dev, cl->pd_dev,
270 DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME |
274 "cluster %d: unable to prop-up power domain\n", cl->no);
275 clk_disable_unprepare(cl->clk_parent);
279 writel_relaxed(cl->no + 1, cl->base + REG_SYNCGEN_MCLK_SEL);
280 mca_modify(cl, REG_SYNCGEN_STATUS, SYNCGEN_STATUS_EN,
282 mca_modify(cl, REG_STATUS, STATUS_MCLK_EN, STATUS_MCLK_EN);
287 static void mca_fe_disable_clocks(struct mca_cluster *cl)
289 mca_modify(cl, REG_SYNCGEN_STATUS, SYNCGEN_STATUS_EN, 0);
290 mca_modify(cl, REG_STATUS, STATUS_MCLK_EN, 0);
292 device_link_del(cl->pd_link);
293 clk_disable_unprepare(cl->clk_parent);
296 static bool mca_fe_clocks_in_use(struct mca_cluster *cl)
298 struct mca_data *mca = cl->host;
299 struct mca_cluster *be_cl;
302 mutex_lock(&mca->port_mutex);
303 for (i = 0; i < mca->nclusters; i++) {
304 be_cl = &mca->clusters[i];
306 if (be_cl->port_driver != cl->no)
309 for_each_pcm_streams(stream) {
310 if (be_cl->clocks_in_use[stream]) {
311 mutex_unlock(&mca->port_mutex);
316 mutex_unlock(&mca->port_mutex);
320 static int mca_be_prepare(struct snd_pcm_substream *substream,
321 struct snd_soc_dai *dai)
323 struct mca_cluster *cl = mca_dai_to_cluster(dai);
324 struct mca_data *mca = cl->host;
325 struct mca_cluster *fe_cl;
328 if (cl->port_driver < 0)
331 fe_cl = &mca->clusters[cl->port_driver];
334 * Typically the CODECs we are paired with will require clocks
335 * to be present at time of unmute with the 'mute_stream' op
336 * or at time of DAPM widget power-up. We need to enable clocks
337 * here at the latest (frontend prepare would be too late).
339 if (!mca_fe_clocks_in_use(fe_cl)) {
340 ret = mca_fe_enable_clocks(fe_cl);
345 cl->clocks_in_use[substream->stream] = true;
350 static int mca_be_hw_free(struct snd_pcm_substream *substream,
351 struct snd_soc_dai *dai)
353 struct mca_cluster *cl = mca_dai_to_cluster(dai);
354 struct mca_data *mca = cl->host;
355 struct mca_cluster *fe_cl;
357 if (cl->port_driver < 0)
361 * We are operating on a foreign cluster here, but since we
362 * belong to the same PCM, accesses should have been
363 * synchronized at ASoC level.
365 fe_cl = &mca->clusters[cl->port_driver];
366 if (!mca_fe_clocks_in_use(fe_cl))
367 return 0; /* Nothing to do */
369 cl->clocks_in_use[substream->stream] = false;
371 if (!mca_fe_clocks_in_use(fe_cl))
372 mca_fe_disable_clocks(fe_cl);
377 static unsigned int mca_crop_mask(unsigned int mask, int nchans)
379 while (hweight32(mask) > nchans)
380 mask &= ~(1 << __fls(mask));
385 static int mca_configure_serdes(struct mca_cluster *cl, int serdes_unit,
386 unsigned int mask, int slots, int nchans,
387 int slot_width, bool is_tx, int port)
389 __iomem void *serdes_base = cl->base + serdes_unit;
390 u32 serdes_conf, serdes_conf_mask;
392 serdes_conf_mask = SERDES_CONF_WIDTH_MASK | SERDES_CONF_NCHANS;
393 serdes_conf = FIELD_PREP(SERDES_CONF_NCHANS, max(slots, 1) - 1);
394 switch (slot_width) {
396 serdes_conf |= SERDES_CONF_WIDTH_16BIT;
399 serdes_conf |= SERDES_CONF_WIDTH_20BIT;
402 serdes_conf |= SERDES_CONF_WIDTH_24BIT;
405 serdes_conf |= SERDES_CONF_WIDTH_32BIT;
411 serdes_conf_mask |= SERDES_CONF_SYNC_SEL;
412 serdes_conf |= FIELD_PREP(SERDES_CONF_SYNC_SEL, cl->no + 1);
415 serdes_conf_mask |= SERDES_CONF_UNK1 | SERDES_CONF_UNK2 |
417 serdes_conf |= SERDES_CONF_UNK1 | SERDES_CONF_UNK2 |
420 serdes_conf_mask |= SERDES_CONF_UNK1 | SERDES_CONF_UNK2 |
422 SERDES_CONF_NO_DATA_FEEDBACK;
423 serdes_conf |= SERDES_CONF_UNK1 | SERDES_CONF_UNK2 |
424 SERDES_CONF_NO_DATA_FEEDBACK;
429 (is_tx ? REG_TX_SERDES_CONF : REG_RX_SERDES_CONF),
430 serdes_conf_mask, serdes_conf);
433 writel_relaxed(0xffffffff,
434 serdes_base + REG_TX_SERDES_SLOTMASK);
435 writel_relaxed(~((u32)mca_crop_mask(mask, nchans)),
436 serdes_base + REG_TX_SERDES_SLOTMASK + 0x4);
437 writel_relaxed(0xffffffff,
438 serdes_base + REG_TX_SERDES_SLOTMASK + 0x8);
439 writel_relaxed(~((u32)mask),
440 serdes_base + REG_TX_SERDES_SLOTMASK + 0xc);
442 writel_relaxed(0xffffffff,
443 serdes_base + REG_RX_SERDES_SLOTMASK);
444 writel_relaxed(~((u32)mca_crop_mask(mask, nchans)),
445 serdes_base + REG_RX_SERDES_SLOTMASK + 0x4);
446 writel_relaxed(1 << port,
447 serdes_base + REG_RX_SERDES_PORT);
453 dev_err(cl->host->dev,
454 "unsupported SERDES configuration requested (mask=0x%x slots=%d slot_width=%d)\n",
455 mask, slots, slot_width);
459 static int mca_fe_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
460 unsigned int rx_mask, int slots, int slot_width)
462 struct mca_cluster *cl = mca_dai_to_cluster(dai);
464 cl->tdm_slots = slots;
465 cl->tdm_slot_width = slot_width;
466 cl->tdm_tx_mask = tx_mask;
467 cl->tdm_rx_mask = rx_mask;
472 static int mca_fe_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
474 struct mca_cluster *cl = mca_dai_to_cluster(dai);
475 struct mca_data *mca = cl->host;
476 bool fpol_inv = false;
480 if ((fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) !=
481 SND_SOC_DAIFMT_BP_FP)
484 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
485 case SND_SOC_DAIFMT_I2S:
489 case SND_SOC_DAIFMT_LEFT_J:
497 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
498 case SND_SOC_DAIFMT_NB_IF:
499 case SND_SOC_DAIFMT_IB_IF:
504 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
505 case SND_SOC_DAIFMT_NB_NF:
506 case SND_SOC_DAIFMT_NB_IF:
507 serdes_conf |= SERDES_CONF_BCLK_POL;
514 mca_modify(cl, CLUSTER_TX_OFF + REG_TX_SERDES_CONF,
515 SERDES_CONF_BCLK_POL, serdes_conf);
516 mca_modify(cl, CLUSTER_RX_OFF + REG_RX_SERDES_CONF,
517 SERDES_CONF_BCLK_POL, serdes_conf);
518 writel_relaxed(bitstart,
519 cl->base + CLUSTER_TX_OFF + REG_TX_SERDES_BITSTART);
520 writel_relaxed(bitstart,
521 cl->base + CLUSTER_RX_OFF + REG_RX_SERDES_BITSTART);
526 dev_err(mca->dev, "unsupported DAI format (0x%x) requested\n", fmt);
530 static int mca_set_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio)
532 struct mca_cluster *cl = mca_dai_to_cluster(dai);
534 cl->bclk_ratio = ratio;
539 static int mca_fe_get_port(struct snd_pcm_substream *substream)
541 struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(substream);
542 struct snd_soc_pcm_runtime *be;
543 struct snd_soc_dpcm *dpcm;
546 for_each_dpcm_be(fe, substream->stream, dpcm) {
554 return mca_dai_to_cluster(asoc_rtd_to_cpu(be, 0))->no;
557 static int mca_fe_hw_params(struct snd_pcm_substream *substream,
558 struct snd_pcm_hw_params *params,
559 struct snd_soc_dai *dai)
561 struct mca_cluster *cl = mca_dai_to_cluster(dai);
562 struct mca_data *mca = cl->host;
563 struct device *dev = mca->dev;
564 unsigned int samp_rate = params_rate(params);
565 bool is_tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
566 bool refine_tdm = false;
567 unsigned long bclk_ratio;
568 unsigned int tdm_slots, tdm_slot_width, tdm_mask;
570 int ret, port, nchans_ceiled;
572 if (!cl->tdm_slot_width) {
574 * We were not given TDM settings from above, set initial
575 * guesses which will later be refined.
577 tdm_slot_width = params_width(params);
578 tdm_slots = params_channels(params);
581 tdm_slot_width = cl->tdm_slot_width;
582 tdm_slots = cl->tdm_slots;
583 tdm_mask = is_tx ? cl->tdm_tx_mask : cl->tdm_rx_mask;
587 bclk_ratio = cl->bclk_ratio;
589 bclk_ratio = tdm_slot_width * tdm_slots;
592 int nchannels = params_channels(params);
595 dev_err(dev, "missing TDM for stream with two or more channels\n");
599 if ((bclk_ratio % nchannels) != 0) {
600 dev_err(dev, "BCLK ratio (%ld) not divisible by no. of channels (%d)\n",
601 bclk_ratio, nchannels);
605 tdm_slot_width = bclk_ratio / nchannels;
607 if (tdm_slot_width > 32 && nchannels == 1)
610 if (tdm_slot_width < params_width(params)) {
611 dev_err(dev, "TDM slots too narrow (tdm=%d params=%d)\n",
612 tdm_slot_width, params_width(params));
616 tdm_mask = (1 << tdm_slots) - 1;
619 port = mca_fe_get_port(substream);
623 ret = mca_configure_serdes(cl, is_tx ? CLUSTER_TX_OFF : CLUSTER_RX_OFF,
624 tdm_mask, tdm_slots, params_channels(params),
625 tdm_slot_width, is_tx, port);
629 pad = 32 - params_width(params);
632 * TODO: Here the register semantics aren't clear.
634 nchans_ceiled = min_t(int, params_channels(params), 4);
635 regval = FIELD_PREP(DMA_ADAPTER_NCHANS, nchans_ceiled) |
636 FIELD_PREP(DMA_ADAPTER_TX_NCHANS, 0x2) |
637 FIELD_PREP(DMA_ADAPTER_RX_NCHANS, 0x2) |
638 FIELD_PREP(DMA_ADAPTER_TX_LSB_PAD, pad) |
639 FIELD_PREP(DMA_ADAPTER_RX_MSB_PAD, pad);
641 #ifndef USE_RXB_FOR_CAPTURE
642 writel_relaxed(regval, mca->switch_base + REG_DMA_ADAPTER_A(cl->no));
645 writel_relaxed(regval,
646 mca->switch_base + REG_DMA_ADAPTER_A(cl->no));
648 writel_relaxed(regval,
649 mca->switch_base + REG_DMA_ADAPTER_B(cl->no));
652 if (!mca_fe_clocks_in_use(cl)) {
654 * Set up FSYNC duty cycle as even as possible.
656 writel_relaxed((bclk_ratio / 2) - 1,
657 cl->base + REG_SYNCGEN_HI_PERIOD);
658 writel_relaxed(((bclk_ratio + 1) / 2) - 1,
659 cl->base + REG_SYNCGEN_LO_PERIOD);
660 writel_relaxed(FIELD_PREP(MCLK_CONF_DIV, 0x1),
661 cl->base + REG_MCLK_CONF);
663 ret = clk_set_rate(cl->clk_parent, bclk_ratio * samp_rate);
665 dev_err(mca->dev, "cluster %d: unable to set clock parent: %d\n",
674 static const struct snd_soc_dai_ops mca_fe_ops = {
675 .set_fmt = mca_fe_set_fmt,
676 .set_bclk_ratio = mca_set_bclk_ratio,
677 .set_tdm_slot = mca_fe_set_tdm_slot,
678 .hw_params = mca_fe_hw_params,
679 .trigger = mca_fe_trigger,
682 static bool mca_be_started(struct mca_cluster *cl)
686 for_each_pcm_streams(stream)
687 if (cl->port_started[stream])
692 static int mca_be_startup(struct snd_pcm_substream *substream,
693 struct snd_soc_dai *dai)
695 struct snd_soc_pcm_runtime *be = asoc_substream_to_rtd(substream);
696 struct snd_soc_pcm_runtime *fe;
697 struct mca_cluster *cl = mca_dai_to_cluster(dai);
698 struct mca_cluster *fe_cl;
699 struct mca_data *mca = cl->host;
700 struct snd_soc_dpcm *dpcm;
704 for_each_dpcm_fe(be, substream->stream, dpcm) {
705 if (fe && dpcm->fe != fe) {
706 dev_err(mca->dev, "many FE per one BE unsupported\n");
716 fe_cl = mca_dai_to_cluster(asoc_rtd_to_cpu(fe, 0));
718 if (mca_be_started(cl)) {
720 * Port is already started in the other direction.
721 * Make sure there isn't a conflict with another cluster
724 if (cl->port_driver != fe_cl->no)
727 cl->port_started[substream->stream] = true;
731 writel_relaxed(PORT_ENABLES_CLOCKS | PORT_ENABLES_TX_DATA,
732 cl->base + REG_PORT_ENABLES);
733 writel_relaxed(FIELD_PREP(PORT_CLOCK_SEL, fe_cl->no + 1),
734 cl->base + REG_PORT_CLOCK_SEL);
735 writel_relaxed(PORT_DATA_SEL_TXA(fe_cl->no),
736 cl->base + REG_PORT_DATA_SEL);
737 mutex_lock(&mca->port_mutex);
738 cl->port_driver = fe_cl->no;
739 mutex_unlock(&mca->port_mutex);
740 cl->port_started[substream->stream] = true;
745 static void mca_be_shutdown(struct snd_pcm_substream *substream,
746 struct snd_soc_dai *dai)
748 struct mca_cluster *cl = mca_dai_to_cluster(dai);
749 struct mca_data *mca = cl->host;
751 cl->port_started[substream->stream] = false;
753 if (!mca_be_started(cl)) {
755 * Were we the last direction to shutdown?
756 * Turn off the lights.
758 writel_relaxed(0, cl->base + REG_PORT_ENABLES);
759 writel_relaxed(0, cl->base + REG_PORT_DATA_SEL);
760 mutex_lock(&mca->port_mutex);
761 cl->port_driver = -1;
762 mutex_unlock(&mca->port_mutex);
766 static const struct snd_soc_dai_ops mca_be_ops = {
767 .prepare = mca_be_prepare,
768 .hw_free = mca_be_hw_free,
769 .startup = mca_be_startup,
770 .shutdown = mca_be_shutdown,
773 static int mca_set_runtime_hwparams(struct snd_soc_component *component,
774 struct snd_pcm_substream *substream,
775 struct dma_chan *chan)
777 struct device *dma_dev = chan->device->dev;
778 struct snd_dmaengine_dai_dma_data dma_data = {};
781 struct snd_pcm_hardware hw;
783 memset(&hw, 0, sizeof(hw));
785 hw.info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
786 SNDRV_PCM_INFO_INTERLEAVED;
788 hw.periods_max = UINT_MAX;
789 hw.period_bytes_min = 256;
790 hw.period_bytes_max = dma_get_max_seg_size(dma_dev);
791 hw.buffer_bytes_max = SIZE_MAX;
794 ret = snd_dmaengine_pcm_refine_runtime_hwparams(substream, &dma_data,
800 return snd_soc_set_runtime_hwparams(substream, &hw);
803 static int mca_pcm_open(struct snd_soc_component *component,
804 struct snd_pcm_substream *substream)
806 struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
807 struct mca_cluster *cl = mca_dai_to_cluster(asoc_rtd_to_cpu(rtd, 0));
808 struct dma_chan *chan = cl->dma_chans[substream->stream];
811 if (rtd->dai_link->no_pcm)
814 ret = mca_set_runtime_hwparams(component, substream, chan);
818 return snd_dmaengine_pcm_open(substream, chan);
821 static int mca_hw_params(struct snd_soc_component *component,
822 struct snd_pcm_substream *substream,
823 struct snd_pcm_hw_params *params)
825 struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
826 struct dma_chan *chan = snd_dmaengine_pcm_get_chan(substream);
827 struct dma_slave_config slave_config;
830 if (rtd->dai_link->no_pcm)
833 memset(&slave_config, 0, sizeof(slave_config));
834 ret = snd_hwparams_to_dma_slave_config(substream, params,
839 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
840 slave_config.dst_port_window_size =
841 min_t(u32, params_channels(params), 4);
843 slave_config.src_port_window_size =
844 min_t(u32, params_channels(params), 4);
846 return dmaengine_slave_config(chan, &slave_config);
849 static int mca_close(struct snd_soc_component *component,
850 struct snd_pcm_substream *substream)
852 struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
854 if (rtd->dai_link->no_pcm)
857 return snd_dmaengine_pcm_close(substream);
860 static int mca_trigger(struct snd_soc_component *component,
861 struct snd_pcm_substream *substream, int cmd)
863 struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
865 if (rtd->dai_link->no_pcm)
869 * Before we do the PCM trigger proper, insert an opportunity
870 * to reset the frontend's SERDES.
872 mca_fe_early_trigger(substream, cmd, asoc_rtd_to_cpu(rtd, 0));
874 return snd_dmaengine_pcm_trigger(substream, cmd);
877 static snd_pcm_uframes_t mca_pointer(struct snd_soc_component *component,
878 struct snd_pcm_substream *substream)
880 struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
882 if (rtd->dai_link->no_pcm)
885 return snd_dmaengine_pcm_pointer(substream);
888 static struct dma_chan *mca_request_dma_channel(struct mca_cluster *cl, unsigned int stream)
890 bool is_tx = (stream == SNDRV_PCM_STREAM_PLAYBACK);
891 #ifndef USE_RXB_FOR_CAPTURE
892 char *name = devm_kasprintf(cl->host->dev, GFP_KERNEL,
893 is_tx ? "tx%da" : "rx%da", cl->no);
895 char *name = devm_kasprintf(cl->host->dev, GFP_KERNEL,
896 is_tx ? "tx%da" : "rx%db", cl->no);
898 return of_dma_request_slave_channel(cl->host->dev->of_node, name);
902 static void mca_pcm_free(struct snd_soc_component *component,
905 struct snd_soc_pcm_runtime *rtd = snd_pcm_chip(pcm);
906 struct mca_cluster *cl = mca_dai_to_cluster(asoc_rtd_to_cpu(rtd, 0));
909 if (rtd->dai_link->no_pcm)
912 for_each_pcm_streams(i) {
913 struct snd_pcm_substream *substream =
914 rtd->pcm->streams[i].substream;
916 if (!substream || !cl->dma_chans[i])
919 dma_release_channel(cl->dma_chans[i]);
920 cl->dma_chans[i] = NULL;
925 static int mca_pcm_new(struct snd_soc_component *component,
926 struct snd_soc_pcm_runtime *rtd)
928 struct mca_cluster *cl = mca_dai_to_cluster(asoc_rtd_to_cpu(rtd, 0));
931 if (rtd->dai_link->no_pcm)
934 for_each_pcm_streams(i) {
935 struct snd_pcm_substream *substream =
936 rtd->pcm->streams[i].substream;
937 struct dma_chan *chan;
942 chan = mca_request_dma_channel(cl, i);
944 if (IS_ERR_OR_NULL(chan)) {
945 dev_err(component->dev, "unable to obtain DMA channel (stream %d cluster %d): %pe\n",
947 mca_pcm_free(component, rtd->pcm);
951 cl->dma_chans[i] = chan;
952 snd_pcm_set_managed_buffer(substream, SNDRV_DMA_TYPE_DEV_IRAM,
953 chan->device->dev, 512 * 1024 * 6,
960 static const struct snd_soc_component_driver mca_component = {
962 .open = mca_pcm_open,
964 .hw_params = mca_hw_params,
965 .trigger = mca_trigger,
966 .pointer = mca_pointer,
967 .pcm_construct = mca_pcm_new,
968 .pcm_destruct = mca_pcm_free,
971 static void apple_mca_release(struct mca_data *mca)
975 for (i = 0; i < mca->nclusters; i++) {
976 struct mca_cluster *cl = &mca->clusters[i];
978 if (!IS_ERR_OR_NULL(cl->clk_parent))
979 clk_put(cl->clk_parent);
981 if (!IS_ERR_OR_NULL(cl->pd_dev))
982 dev_pm_domain_detach(cl->pd_dev, true);
986 device_link_del(mca->pd_link);
988 if (!IS_ERR_OR_NULL(mca->pd_dev))
989 dev_pm_domain_detach(mca->pd_dev, true);
991 reset_control_rearm(mca->rstc);
994 static int apple_mca_probe(struct platform_device *pdev)
996 struct mca_data *mca;
997 struct mca_cluster *clusters;
998 struct snd_soc_dai_driver *dai_drivers;
999 struct resource *res;
1004 base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
1006 return PTR_ERR(base);
1008 if (resource_size(res) < CLUSTER_STRIDE)
1010 nclusters = (resource_size(res) - CLUSTER_STRIDE) / CLUSTER_STRIDE + 1;
1012 mca = devm_kzalloc(&pdev->dev, struct_size(mca, clusters, nclusters),
1016 mca->dev = &pdev->dev;
1017 mca->nclusters = nclusters;
1018 mutex_init(&mca->port_mutex);
1019 platform_set_drvdata(pdev, mca);
1020 clusters = mca->clusters;
1023 devm_platform_ioremap_resource(pdev, 1);
1024 if (IS_ERR(mca->switch_base))
1025 return PTR_ERR(mca->switch_base);
1027 mca->rstc = devm_reset_control_get_optional_shared(&pdev->dev, NULL);
1028 if (IS_ERR(mca->rstc))
1029 return PTR_ERR(mca->rstc);
1031 dai_drivers = devm_kzalloc(
1032 &pdev->dev, sizeof(*dai_drivers) * 2 * nclusters, GFP_KERNEL);
1036 mca->pd_dev = dev_pm_domain_attach_by_id(&pdev->dev, 0);
1037 if (IS_ERR(mca->pd_dev))
1040 mca->pd_link = device_link_add(&pdev->dev, mca->pd_dev,
1041 DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME |
1042 DL_FLAG_RPM_ACTIVE);
1043 if (!mca->pd_link) {
1045 /* Prevent an unbalanced reset rearm */
1050 reset_control_reset(mca->rstc);
1052 for (i = 0; i < nclusters; i++) {
1053 struct mca_cluster *cl = &clusters[i];
1054 struct snd_soc_dai_driver *fe =
1055 &dai_drivers[mca->nclusters + i];
1056 struct snd_soc_dai_driver *be = &dai_drivers[i];
1060 cl->base = base + CLUSTER_STRIDE * i;
1061 cl->port_driver = -1;
1062 cl->clk_parent = of_clk_get(pdev->dev.of_node, i);
1063 if (IS_ERR(cl->clk_parent)) {
1064 dev_err(&pdev->dev, "unable to obtain clock %d: %ld\n",
1065 i, PTR_ERR(cl->clk_parent));
1066 ret = PTR_ERR(cl->clk_parent);
1069 cl->pd_dev = dev_pm_domain_attach_by_id(&pdev->dev, i + 1);
1070 if (IS_ERR(cl->pd_dev)) {
1072 "unable to obtain cluster %d PD: %ld\n", i,
1073 PTR_ERR(cl->pd_dev));
1074 ret = PTR_ERR(cl->pd_dev);
1080 devm_kasprintf(&pdev->dev, GFP_KERNEL, "mca-pcm-%d", i);
1085 fe->ops = &mca_fe_ops;
1086 fe->playback.channels_min = 1;
1087 fe->playback.channels_max = 32;
1088 fe->playback.rates = SNDRV_PCM_RATE_8000_192000;
1089 fe->playback.formats = APPLE_MCA_FMTBITS;
1090 fe->capture.channels_min = 1;
1091 fe->capture.channels_max = 32;
1092 fe->capture.rates = SNDRV_PCM_RATE_8000_192000;
1093 fe->capture.formats = APPLE_MCA_FMTBITS;
1094 fe->symmetric_rate = 1;
1096 fe->playback.stream_name =
1097 devm_kasprintf(&pdev->dev, GFP_KERNEL, "PCM%d TX", i);
1098 fe->capture.stream_name =
1099 devm_kasprintf(&pdev->dev, GFP_KERNEL, "PCM%d RX", i);
1101 if (!fe->playback.stream_name || !fe->capture.stream_name) {
1106 be->id = i + nclusters;
1107 be->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "mca-i2s-%d", i);
1112 be->ops = &mca_be_ops;
1113 be->playback.channels_min = 1;
1114 be->playback.channels_max = 32;
1115 be->playback.rates = SNDRV_PCM_RATE_8000_192000;
1116 be->playback.formats = APPLE_MCA_FMTBITS;
1117 be->capture.channels_min = 1;
1118 be->capture.channels_max = 32;
1119 be->capture.rates = SNDRV_PCM_RATE_8000_192000;
1120 be->capture.formats = APPLE_MCA_FMTBITS;
1122 be->playback.stream_name =
1123 devm_kasprintf(&pdev->dev, GFP_KERNEL, "I2S%d TX", i);
1124 be->capture.stream_name =
1125 devm_kasprintf(&pdev->dev, GFP_KERNEL, "I2S%d RX", i);
1126 if (!be->playback.stream_name || !be->capture.stream_name) {
1132 ret = snd_soc_register_component(&pdev->dev, &mca_component,
1133 dai_drivers, nclusters * 2);
1135 dev_err(&pdev->dev, "unable to register ASoC component: %d\n",
1143 apple_mca_release(mca);
1147 static int apple_mca_remove(struct platform_device *pdev)
1149 struct mca_data *mca = platform_get_drvdata(pdev);
1151 snd_soc_unregister_component(&pdev->dev);
1152 apple_mca_release(mca);
1156 static const struct of_device_id apple_mca_of_match[] = {
1157 { .compatible = "apple,mca", },
1160 MODULE_DEVICE_TABLE(of, apple_mca_of_match);
1162 static struct platform_driver apple_mca_driver = {
1164 .name = "apple-mca",
1165 .of_match_table = apple_mca_of_match,
1167 .probe = apple_mca_probe,
1168 .remove = apple_mca_remove,
1170 module_platform_driver(apple_mca_driver);
1172 MODULE_AUTHOR("Martin PoviĊĦer <povik+lin@cutebit.org>");
1173 MODULE_DESCRIPTION("ASoC Apple MCA driver");
1174 MODULE_LICENSE("GPL");