+static struct dma_slave_config sa1100_irda_fir_rx = {
+ .direction = DMA_FROM_DEVICE,
+ .src_addr = __PREG(Ser2HSDR),
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
+ .src_maxburst = 8,
+};
+
+static struct dma_slave_config sa1100_irda_fir_tx = {
+ .direction = DMA_TO_DEVICE,
+ .dst_addr = __PREG(Ser2HSDR),
+ .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
+ .dst_maxburst = 8,
+};
+
+static unsigned sa1100_irda_dma_xferred(struct sa1100_buf *buf)
+{
+ struct dma_chan *chan = buf->chan;
+ struct dma_tx_state state;
+ enum dma_status status;
+
+ status = chan->device->device_tx_status(chan, buf->cookie, &state);
+ if (status != DMA_PAUSED)
+ return 0;
+
+ return sg_dma_len(&buf->sg) - state.residue;
+}
+
+static int sa1100_irda_dma_request(struct device *dev, struct sa1100_buf *buf,
+ const char *name, struct dma_slave_config *cfg)
+{
+ dma_cap_mask_t m;
+ int ret;
+
+ dma_cap_zero(m);
+ dma_cap_set(DMA_SLAVE, m);
+
+ buf->chan = dma_request_channel(m, sa11x0_dma_filter_fn, (void *)name);
+ if (!buf->chan) {
+ dev_err(dev, "unable to request DMA channel for %s\n",
+ name);
+ return -ENOENT;
+ }
+
+ ret = dmaengine_slave_config(buf->chan, cfg);
+ if (ret)
+ dev_warn(dev, "DMA slave_config for %s returned %d\n",
+ name, ret);
+
+ buf->dev = buf->chan->device->dev;
+
+ return 0;
+}
+
+static void sa1100_irda_dma_start(struct sa1100_buf *buf,
+ enum dma_transfer_direction dir, dma_async_tx_callback cb, void *cb_p)
+{
+ struct dma_async_tx_descriptor *desc;
+ struct dma_chan *chan = buf->chan;
+
+ desc = chan->device->device_prep_slave_sg(chan, &buf->sg, 1, dir,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (desc) {
+ desc->callback = cb;
+ desc->callback_param = cb_p;
+ buf->cookie = dmaengine_submit(desc);
+ dma_async_issue_pending(chan);
+ }
+}
+