1 // SPDX-License-Identifier: GPL-2.0-only
3 * Driver for Audio DMA Controller (ADMAC) on t8103 (M1) and other Apple chips
5 * Copyright (C) The Asahi Linux Contributors
8 #include <linux/bits.h>
9 #include <linux/bitfield.h>
10 #include <linux/device.h>
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/of_device.h>
14 #include <linux/of_dma.h>
15 #include <linux/interrupt.h>
16 #include <linux/spinlock.h>
18 #include "dmaengine.h"
20 #define NCHANNELS_MAX 64
21 #define IRQ_NOUTPUTS 4
23 #define RING_WRITE_SLOT GENMASK(1, 0)
24 #define RING_READ_SLOT GENMASK(5, 4)
25 #define RING_FULL BIT(9)
26 #define RING_EMPTY BIT(8)
27 #define RING_ERR BIT(10)
29 #define STATUS_DESC_DONE BIT(0)
30 #define STATUS_ERR BIT(6)
32 #define FLAG_DESC_NOTIFY BIT(16)
34 #define REG_TX_START 0x0000
35 #define REG_TX_STOP 0x0004
36 #define REG_RX_START 0x0008
37 #define REG_RX_STOP 0x000c
39 #define REG_CHAN_CTL(ch) (0x8000 + (ch) * 0x200)
40 #define REG_CHAN_CTL_RST_RINGS BIT(0)
42 #define REG_DESC_RING(ch) (0x8070 + (ch) * 0x200)
43 #define REG_REPORT_RING(ch) (0x8074 + (ch) * 0x200)
45 #define REG_RESIDUE(ch) (0x8064 + (ch) * 0x200)
47 #define REG_BUS_WIDTH(ch) (0x8040 + (ch) * 0x200)
49 #define BUS_WIDTH_8BIT 0x00
50 #define BUS_WIDTH_16BIT 0x01
51 #define BUS_WIDTH_32BIT 0x02
52 #define BUS_WIDTH_FRAME_2_WORDS 0x10
53 #define BUS_WIDTH_FRAME_4_WORDS 0x20
55 #define CHAN_BUFSIZE 0x8000
57 #define REG_CHAN_FIFOCTL(ch) (0x8054 + (ch) * 0x200)
58 #define CHAN_FIFOCTL_LIMIT GENMASK(31, 16)
59 #define CHAN_FIFOCTL_THRESHOLD GENMASK(15, 0)
61 #define REG_DESC_WRITE(ch) (0x10000 + ((ch) / 2) * 0x4 + ((ch) & 1) * 0x4000)
62 #define REG_REPORT_READ(ch) (0x10100 + ((ch) / 2) * 0x4 + ((ch) & 1) * 0x4000)
64 #define REG_TX_INTSTATE(idx) (0x0030 + (idx) * 4)
65 #define REG_RX_INTSTATE(idx) (0x0040 + (idx) * 4)
66 #define REG_CHAN_INTSTATUS(ch, idx) (0x8010 + (ch) * 0x200 + (idx) * 4)
67 #define REG_CHAN_INTMASK(ch, idx) (0x8020 + (ch) * 0x200 + (idx) * 4)
74 struct admac_data *host;
76 struct tasklet_struct tasklet;
79 struct admac_tx *current_tx;
83 * We maintain a 'submitted' and 'issued' list mainly for interface
84 * correctness. Typical use of the driver (per channel) will be
85 * prepping, submitting and issuing a single cyclic transaction which
86 * will stay current until terminate_all is called.
88 struct list_head submitted;
89 struct list_head issued;
91 struct list_head to_free;
95 struct dma_device dma;
101 struct admac_chan channels[];
105 struct dma_async_tx_descriptor tx;
112 size_t submitted_pos;
113 size_t reclaimed_pos;
115 struct list_head node;
118 static void admac_modify(struct admac_data *ad, int reg, u32 mask, u32 val)
120 void __iomem *addr = ad->base + reg;
121 u32 curr = readl_relaxed(addr);
123 writel_relaxed((curr & ~mask) | (val & mask), addr);
126 static struct admac_chan *to_admac_chan(struct dma_chan *chan)
128 return container_of(chan, struct admac_chan, chan);
131 static struct admac_tx *to_admac_tx(struct dma_async_tx_descriptor *tx)
133 return container_of(tx, struct admac_tx, tx);
136 static enum dma_transfer_direction admac_chan_direction(int channo)
138 /* Channel directions are hardwired */
139 return (channo & 1) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
142 static dma_cookie_t admac_tx_submit(struct dma_async_tx_descriptor *tx)
144 struct admac_tx *adtx = to_admac_tx(tx);
145 struct admac_chan *adchan = to_admac_chan(tx->chan);
149 spin_lock_irqsave(&adchan->lock, flags);
150 cookie = dma_cookie_assign(tx);
151 list_add_tail(&adtx->node, &adchan->submitted);
152 spin_unlock_irqrestore(&adchan->lock, flags);
157 static int admac_desc_free(struct dma_async_tx_descriptor *tx)
159 kfree(to_admac_tx(tx));
164 static struct dma_async_tx_descriptor *admac_prep_dma_cyclic(
165 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
166 size_t period_len, enum dma_transfer_direction direction,
169 struct admac_chan *adchan = container_of(chan, struct admac_chan, chan);
170 struct admac_tx *adtx;
172 if (direction != admac_chan_direction(adchan->no))
175 adtx = kzalloc(sizeof(*adtx), GFP_NOWAIT);
181 adtx->buf_addr = buf_addr;
182 adtx->buf_len = buf_len;
183 adtx->buf_end = buf_addr + buf_len;
184 adtx->period_len = period_len;
186 adtx->submitted_pos = 0;
187 adtx->reclaimed_pos = 0;
189 dma_async_tx_descriptor_init(&adtx->tx, chan);
190 adtx->tx.tx_submit = admac_tx_submit;
191 adtx->tx.desc_free = admac_desc_free;
197 * Write one hardware descriptor for a dmaengine cyclic transaction.
199 static void admac_cyclic_write_one_desc(struct admac_data *ad, int channo,
204 addr = tx->buf_addr + (tx->submitted_pos % tx->buf_len);
206 /* If happens means we have buggy code */
207 WARN_ON_ONCE(addr + tx->period_len > tx->buf_end);
209 dev_dbg(ad->dev, "ch%d descriptor: addr=0x%pad len=0x%zx flags=0x%lx\n",
210 channo, &addr, tx->period_len, FLAG_DESC_NOTIFY);
212 writel_relaxed(lower_32_bits(addr), ad->base + REG_DESC_WRITE(channo));
213 writel_relaxed(upper_32_bits(addr), ad->base + REG_DESC_WRITE(channo));
214 writel_relaxed(tx->period_len, ad->base + REG_DESC_WRITE(channo));
215 writel_relaxed(FLAG_DESC_NOTIFY, ad->base + REG_DESC_WRITE(channo));
217 tx->submitted_pos += tx->period_len;
218 tx->submitted_pos %= 2 * tx->buf_len;
222 * Write all the hardware descriptors for a dmaengine cyclic
223 * transaction there is space for.
225 static void admac_cyclic_write_desc(struct admac_data *ad, int channo,
230 for (i = 0; i < 4; i++) {
231 if (readl_relaxed(ad->base + REG_DESC_RING(channo)) & RING_FULL)
233 admac_cyclic_write_one_desc(ad, channo, tx);
237 static int admac_ring_noccupied_slots(int ringval)
239 int wrslot = FIELD_GET(RING_WRITE_SLOT, ringval);
240 int rdslot = FIELD_GET(RING_READ_SLOT, ringval);
242 if (wrslot != rdslot) {
243 return (wrslot + 4 - rdslot) % 4;
245 WARN_ON((ringval & (RING_FULL | RING_EMPTY)) == 0);
247 if (ringval & RING_FULL)
255 * Read from hardware the residue of a cyclic dmaengine transaction.
257 static u32 admac_cyclic_read_residue(struct admac_data *ad, int channo,
258 struct admac_tx *adtx)
261 u32 residue1, residue2;
265 ring1 = readl_relaxed(ad->base + REG_REPORT_RING(channo));
266 residue1 = readl_relaxed(ad->base + REG_RESIDUE(channo));
267 ring2 = readl_relaxed(ad->base + REG_REPORT_RING(channo));
268 residue2 = readl_relaxed(ad->base + REG_RESIDUE(channo));
270 if (residue2 > residue1) {
272 * Controller must have loaded next descriptor between
273 * the two residue reads
275 nreports = admac_ring_noccupied_slots(ring1) + 1;
277 /* No descriptor load between the two reads, ring2 is safe to use */
278 nreports = admac_ring_noccupied_slots(ring2);
281 pos = adtx->reclaimed_pos + adtx->period_len * (nreports + 1) - residue2;
283 return adtx->buf_len - pos % adtx->buf_len;
286 static enum dma_status admac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
287 struct dma_tx_state *txstate)
289 struct admac_chan *adchan = to_admac_chan(chan);
290 struct admac_data *ad = adchan->host;
291 struct admac_tx *adtx;
297 ret = dma_cookie_status(chan, cookie, txstate);
298 if (ret == DMA_COMPLETE || !txstate)
301 spin_lock_irqsave(&adchan->lock, flags);
302 adtx = adchan->current_tx;
304 if (adtx && adtx->tx.cookie == cookie) {
305 ret = DMA_IN_PROGRESS;
306 residue = admac_cyclic_read_residue(ad, adchan->no, adtx);
308 ret = DMA_IN_PROGRESS;
310 list_for_each_entry(adtx, &adchan->issued, node) {
311 if (adtx->tx.cookie == cookie) {
312 residue = adtx->buf_len;
317 spin_unlock_irqrestore(&adchan->lock, flags);
319 dma_set_residue(txstate, residue);
323 static void admac_start_chan(struct admac_chan *adchan)
325 struct admac_data *ad = adchan->host;
326 u32 startbit = 1 << (adchan->no / 2);
328 writel_relaxed(STATUS_DESC_DONE | STATUS_ERR,
329 ad->base + REG_CHAN_INTSTATUS(adchan->no, ad->irq_index));
330 writel_relaxed(STATUS_DESC_DONE | STATUS_ERR,
331 ad->base + REG_CHAN_INTMASK(adchan->no, ad->irq_index));
333 switch (admac_chan_direction(adchan->no)) {
335 writel_relaxed(startbit, ad->base + REG_TX_START);
338 writel_relaxed(startbit, ad->base + REG_RX_START);
343 dev_dbg(adchan->host->dev, "ch%d start\n", adchan->no);
346 static void admac_stop_chan(struct admac_chan *adchan)
348 struct admac_data *ad = adchan->host;
349 u32 stopbit = 1 << (adchan->no / 2);
351 switch (admac_chan_direction(adchan->no)) {
353 writel_relaxed(stopbit, ad->base + REG_TX_STOP);
356 writel_relaxed(stopbit, ad->base + REG_RX_STOP);
361 dev_dbg(adchan->host->dev, "ch%d stop\n", adchan->no);
364 static void admac_reset_rings(struct admac_chan *adchan)
366 struct admac_data *ad = adchan->host;
368 writel_relaxed(REG_CHAN_CTL_RST_RINGS,
369 ad->base + REG_CHAN_CTL(adchan->no));
370 writel_relaxed(0, ad->base + REG_CHAN_CTL(adchan->no));
373 static void admac_start_current_tx(struct admac_chan *adchan)
375 struct admac_data *ad = adchan->host;
378 admac_reset_rings(adchan);
379 writel_relaxed(0, ad->base + REG_CHAN_CTL(ch));
381 admac_cyclic_write_one_desc(ad, ch, adchan->current_tx);
382 admac_start_chan(adchan);
383 admac_cyclic_write_desc(ad, ch, adchan->current_tx);
386 static void admac_issue_pending(struct dma_chan *chan)
388 struct admac_chan *adchan = to_admac_chan(chan);
392 spin_lock_irqsave(&adchan->lock, flags);
393 list_splice_tail_init(&adchan->submitted, &adchan->issued);
394 if (!list_empty(&adchan->issued) && !adchan->current_tx) {
395 tx = list_first_entry(&adchan->issued, struct admac_tx, node);
398 adchan->current_tx = tx;
399 adchan->nperiod_acks = 0;
400 admac_start_current_tx(adchan);
402 spin_unlock_irqrestore(&adchan->lock, flags);
405 static int admac_pause(struct dma_chan *chan)
407 struct admac_chan *adchan = to_admac_chan(chan);
409 admac_stop_chan(adchan);
414 static int admac_resume(struct dma_chan *chan)
416 struct admac_chan *adchan = to_admac_chan(chan);
418 admac_start_chan(adchan);
423 static int admac_terminate_all(struct dma_chan *chan)
425 struct admac_chan *adchan = to_admac_chan(chan);
428 spin_lock_irqsave(&adchan->lock, flags);
429 admac_stop_chan(adchan);
430 admac_reset_rings(adchan);
432 adchan->current_tx = NULL;
434 * Descriptors can only be freed after the tasklet
435 * has been killed (in admac_synchronize).
437 list_splice_tail_init(&adchan->submitted, &adchan->to_free);
438 list_splice_tail_init(&adchan->issued, &adchan->to_free);
439 spin_unlock_irqrestore(&adchan->lock, flags);
444 static void admac_synchronize(struct dma_chan *chan)
446 struct admac_chan *adchan = to_admac_chan(chan);
447 struct admac_tx *adtx, *_adtx;
451 spin_lock_irqsave(&adchan->lock, flags);
452 list_splice_tail_init(&adchan->to_free, &head);
453 spin_unlock_irqrestore(&adchan->lock, flags);
455 tasklet_kill(&adchan->tasklet);
457 list_for_each_entry_safe(adtx, _adtx, &head, node) {
458 list_del(&adtx->node);
459 admac_desc_free(&adtx->tx);
463 static int admac_alloc_chan_resources(struct dma_chan *chan)
465 struct admac_chan *adchan = to_admac_chan(chan);
467 dma_cookie_init(&adchan->chan);
471 static void admac_free_chan_resources(struct dma_chan *chan)
473 admac_terminate_all(chan);
474 admac_synchronize(chan);
477 static struct dma_chan *admac_dma_of_xlate(struct of_phandle_args *dma_spec,
478 struct of_dma *ofdma)
480 struct admac_data *ad = (struct admac_data *) ofdma->of_dma_data;
483 if (dma_spec->args_count != 1)
486 index = dma_spec->args[0];
488 if (index >= ad->nchannels) {
489 dev_err(ad->dev, "channel index %u out of bounds\n", index);
493 return &ad->channels[index].chan;
496 static int admac_drain_reports(struct admac_data *ad, int channo)
500 for (count = 0; count < 4; count++) {
501 u32 countval_hi, countval_lo, unk1, flags;
503 if (readl_relaxed(ad->base + REG_REPORT_RING(channo)) & RING_EMPTY)
506 countval_lo = readl_relaxed(ad->base + REG_REPORT_READ(channo));
507 countval_hi = readl_relaxed(ad->base + REG_REPORT_READ(channo));
508 unk1 = readl_relaxed(ad->base + REG_REPORT_READ(channo));
509 flags = readl_relaxed(ad->base + REG_REPORT_READ(channo));
511 dev_dbg(ad->dev, "ch%d report: countval=0x%llx unk1=0x%x flags=0x%x\n",
512 channo, ((u64) countval_hi) << 32 | countval_lo, unk1, flags);
518 static void admac_handle_status_err(struct admac_data *ad, int channo)
520 bool handled = false;
522 if (readl_relaxed(ad->base + REG_DESC_RING(channo)) & RING_ERR) {
523 writel_relaxed(RING_ERR, ad->base + REG_DESC_RING(channo));
524 dev_err_ratelimited(ad->dev, "ch%d descriptor ring error\n", channo);
528 if (readl_relaxed(ad->base + REG_REPORT_RING(channo)) & RING_ERR) {
529 writel_relaxed(RING_ERR, ad->base + REG_REPORT_RING(channo));
530 dev_err_ratelimited(ad->dev, "ch%d report ring error\n", channo);
534 if (unlikely(!handled)) {
535 dev_err(ad->dev, "ch%d unknown error, masking errors as cause of IRQs\n", channo);
536 admac_modify(ad, REG_CHAN_INTMASK(channo, ad->irq_index),
541 static void admac_handle_status_desc_done(struct admac_data *ad, int channo)
543 struct admac_chan *adchan = &ad->channels[channo];
547 writel_relaxed(STATUS_DESC_DONE,
548 ad->base + REG_CHAN_INTSTATUS(channo, ad->irq_index));
550 spin_lock_irqsave(&adchan->lock, flags);
551 nreports = admac_drain_reports(ad, channo);
553 if (adchan->current_tx) {
554 struct admac_tx *tx = adchan->current_tx;
556 adchan->nperiod_acks += nreports;
557 tx->reclaimed_pos += nreports * tx->period_len;
558 tx->reclaimed_pos %= 2 * tx->buf_len;
560 admac_cyclic_write_desc(ad, channo, tx);
561 tasklet_schedule(&adchan->tasklet);
563 spin_unlock_irqrestore(&adchan->lock, flags);
566 static void admac_handle_chan_int(struct admac_data *ad, int no)
568 u32 cause = readl_relaxed(ad->base + REG_CHAN_INTSTATUS(no, ad->irq_index));
570 if (cause & STATUS_ERR)
571 admac_handle_status_err(ad, no);
573 if (cause & STATUS_DESC_DONE)
574 admac_handle_status_desc_done(ad, no);
577 static irqreturn_t admac_interrupt(int irq, void *devid)
579 struct admac_data *ad = devid;
580 u32 rx_intstate, tx_intstate;
583 rx_intstate = readl_relaxed(ad->base + REG_RX_INTSTATE(ad->irq_index));
584 tx_intstate = readl_relaxed(ad->base + REG_TX_INTSTATE(ad->irq_index));
586 if (!tx_intstate && !rx_intstate)
589 for (i = 0; i < ad->nchannels; i += 2) {
591 admac_handle_chan_int(ad, i);
595 for (i = 1; i < ad->nchannels; i += 2) {
597 admac_handle_chan_int(ad, i);
604 static void admac_chan_tasklet(struct tasklet_struct *t)
606 struct admac_chan *adchan = from_tasklet(adchan, t, tasklet);
607 struct admac_tx *adtx;
608 struct dmaengine_desc_callback cb;
609 struct dmaengine_result tx_result;
612 spin_lock_irq(&adchan->lock);
613 adtx = adchan->current_tx;
614 nacks = adchan->nperiod_acks;
615 adchan->nperiod_acks = 0;
616 spin_unlock_irq(&adchan->lock);
621 tx_result.result = DMA_TRANS_NOERROR;
622 tx_result.residue = 0;
624 dmaengine_desc_get_callback(&adtx->tx, &cb);
626 dmaengine_desc_callback_invoke(&cb, &tx_result);
629 static int admac_device_config(struct dma_chan *chan,
630 struct dma_slave_config *config)
632 struct admac_chan *adchan = to_admac_chan(chan);
633 struct admac_data *ad = adchan->host;
634 bool is_tx = admac_chan_direction(adchan->no) == DMA_MEM_TO_DEV;
638 switch (is_tx ? config->dst_addr_width : config->src_addr_width) {
639 case DMA_SLAVE_BUSWIDTH_1_BYTE:
641 bus_width |= BUS_WIDTH_8BIT;
643 case DMA_SLAVE_BUSWIDTH_2_BYTES:
645 bus_width |= BUS_WIDTH_16BIT;
647 case DMA_SLAVE_BUSWIDTH_4_BYTES:
649 bus_width |= BUS_WIDTH_32BIT;
656 * We take port_window_size to be the number of words in a frame.
658 * The controller has some means of out-of-band signalling, to the peripheral,
659 * of words position in a frame. That's where the importance of this control
662 switch (is_tx ? config->dst_port_window_size : config->src_port_window_size) {
666 bus_width |= BUS_WIDTH_FRAME_2_WORDS;
669 bus_width |= BUS_WIDTH_FRAME_4_WORDS;
675 writel_relaxed(bus_width, ad->base + REG_BUS_WIDTH(adchan->no));
678 * By FIFOCTL_LIMIT we seem to set the maximal number of bytes allowed to be
679 * held in controller's per-channel FIFO. Transfers seem to be triggered
680 * around the time FIFO occupancy touches FIFOCTL_THRESHOLD.
682 * The numbers we set are more or less arbitrary.
684 writel_relaxed(FIELD_PREP(CHAN_FIFOCTL_LIMIT, 0x30 * wordsize)
685 | FIELD_PREP(CHAN_FIFOCTL_THRESHOLD, 0x18 * wordsize),
686 ad->base + REG_CHAN_FIFOCTL(adchan->no));
691 static int admac_probe(struct platform_device *pdev)
693 struct device_node *np = pdev->dev.of_node;
694 struct admac_data *ad;
695 struct dma_device *dma;
699 err = of_property_read_u32(np, "dma-channels", &nchannels);
700 if (err || nchannels > NCHANNELS_MAX) {
701 dev_err(&pdev->dev, "missing or invalid dma-channels property\n");
705 ad = devm_kzalloc(&pdev->dev, struct_size(ad, channels, nchannels), GFP_KERNEL);
709 platform_set_drvdata(pdev, ad);
710 ad->dev = &pdev->dev;
711 ad->nchannels = nchannels;
714 * The controller has 4 IRQ outputs. Try them all until
715 * we find one we can use.
717 for (i = 0; i < IRQ_NOUTPUTS; i++) {
718 irq = platform_get_irq_optional(pdev, i);
726 return dev_err_probe(&pdev->dev, irq, "no usable interrupt\n");
728 err = devm_request_irq(&pdev->dev, irq, admac_interrupt,
729 0, dev_name(&pdev->dev), ad);
731 return dev_err_probe(&pdev->dev, err,
732 "unable to register interrupt\n");
734 ad->base = devm_platform_ioremap_resource(pdev, 0);
735 if (IS_ERR(ad->base))
736 return dev_err_probe(&pdev->dev, PTR_ERR(ad->base),
737 "unable to obtain MMIO resource\n");
741 dma_cap_set(DMA_PRIVATE, dma->cap_mask);
742 dma_cap_set(DMA_CYCLIC, dma->cap_mask);
744 dma->dev = &pdev->dev;
745 dma->device_alloc_chan_resources = admac_alloc_chan_resources;
746 dma->device_free_chan_resources = admac_free_chan_resources;
747 dma->device_tx_status = admac_tx_status;
748 dma->device_issue_pending = admac_issue_pending;
749 dma->device_terminate_all = admac_terminate_all;
750 dma->device_synchronize = admac_synchronize;
751 dma->device_prep_dma_cyclic = admac_prep_dma_cyclic;
752 dma->device_config = admac_device_config;
753 dma->device_pause = admac_pause;
754 dma->device_resume = admac_resume;
756 dma->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
757 dma->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
758 dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
759 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
760 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
762 INIT_LIST_HEAD(&dma->channels);
763 for (i = 0; i < nchannels; i++) {
764 struct admac_chan *adchan = &ad->channels[i];
768 adchan->chan.device = &ad->dma;
769 spin_lock_init(&adchan->lock);
770 INIT_LIST_HEAD(&adchan->submitted);
771 INIT_LIST_HEAD(&adchan->issued);
772 INIT_LIST_HEAD(&adchan->to_free);
773 list_add_tail(&adchan->chan.device_node, &dma->channels);
774 tasklet_setup(&adchan->tasklet, admac_chan_tasklet);
777 err = dma_async_device_register(&ad->dma);
779 return dev_err_probe(&pdev->dev, err, "failed to register DMA device\n");
781 err = of_dma_controller_register(pdev->dev.of_node, admac_dma_of_xlate, ad);
783 dma_async_device_unregister(&ad->dma);
784 return dev_err_probe(&pdev->dev, err, "failed to register with OF\n");
790 static int admac_remove(struct platform_device *pdev)
792 struct admac_data *ad = platform_get_drvdata(pdev);
794 of_dma_controller_free(pdev->dev.of_node);
795 dma_async_device_unregister(&ad->dma);
800 static const struct of_device_id admac_of_match[] = {
801 { .compatible = "apple,admac", },
804 MODULE_DEVICE_TABLE(of, admac_of_match);
806 static struct platform_driver apple_admac_driver = {
808 .name = "apple-admac",
809 .of_match_table = admac_of_match,
811 .probe = admac_probe,
812 .remove = admac_remove,
814 module_platform_driver(apple_admac_driver);
816 MODULE_AUTHOR("Martin PoviĊĦer <povik+lin@cutebit.org>");
817 MODULE_DESCRIPTION("Driver for Audio DMA Controller (ADMAC) on Apple SoCs");
818 MODULE_LICENSE("GPL");