dma: shdma: convert to the shdma base library
authorGuennadi Liakhovetski <g.liakhovetski@gmx.de>
Wed, 9 May 2012 15:09:21 +0000 (17:09 +0200)
committerVinod Koul <vinod.koul@linux.intel.com>
Fri, 13 Jul 2012 03:43:08 +0000 (09:13 +0530)
The shdma base library has originally been extracted from the shdma driver,
which now can be converted to actually use it.

Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>
drivers/dma/sh/shdma.c
drivers/dma/sh/shdma.h
include/linux/sh_dma.h

index 8ab4a1f..c393b35 100644 (file)
@@ -3,6 +3,7 @@
  *
  * base is drivers/dma/flsdma.c
  *
+ * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
  * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
  * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
  * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
 #include "../dmaengine.h"
 #include "shdma.h"
 
-/* DMA descriptor control */
-enum sh_dmae_desc_status {
-       DESC_IDLE,
-       DESC_PREPARED,
-       DESC_SUBMITTED,
-       DESC_COMPLETED, /* completed, have to call callback */
-       DESC_WAITING,   /* callback called, waiting for ack / re-submit */
-};
+#define SH_DMAE_DRV_NAME "sh-dma-engine"
 
-#define NR_DESCS_PER_CHANNEL 32
 /* Default MEMCPY transfer size = 2^2 = 4 bytes */
 #define LOG2_DEFAULT_XFER_SIZE 2
+#define SH_DMA_SLAVE_NUMBER 256
+#define SH_DMA_TCR_MAX (16 * 1024 * 1024 - 1)
 
 /*
  * Used for write-side mutual exclusion for the global device list,
@@ -54,18 +49,12 @@ enum sh_dmae_desc_status {
 static DEFINE_SPINLOCK(sh_dmae_lock);
 static LIST_HEAD(sh_dmae_devices);
 
-/* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
-static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)];
-
-static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
-static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan);
-
 static void chclr_write(struct sh_dmae_chan *sh_dc, u32 data)
 {
        struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
 
        __raw_writel(data, shdev->chan_reg +
-                    shdev->pdata->channel[sh_dc->id].chclr_offset);
+                    shdev->pdata->channel[sh_dc->shdma_chan.id].chclr_offset);
 }
 
 static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
@@ -155,11 +144,11 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev)
        spin_unlock_irqrestore(&sh_dmae_lock, flags);
 
        if (dmaor & (DMAOR_AE | DMAOR_NMIF)) {
-               dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n");
+               dev_warn(shdev->shdma_dev.dma_dev.dev, "Can't initialize DMAOR.\n");
                return -EIO;
        }
        if (shdev->pdata->dmaor_init & ~dmaor)
-               dev_warn(shdev->common.dev,
+               dev_warn(shdev->shdma_dev.dma_dev.dev,
                         "DMAOR=0x%x hasn't latched the initial value 0x%x.\n",
                         dmaor, shdev->pdata->dmaor_init);
        return 0;
@@ -224,15 +213,6 @@ static void dmae_start(struct sh_dmae_chan *sh_chan)
        chcr_write(sh_chan, chcr & ~CHCR_TE);
 }
 
-static void dmae_halt(struct sh_dmae_chan *sh_chan)
-{
-       struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
-       u32 chcr = chcr_read(sh_chan);
-
-       chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit);
-       chcr_write(sh_chan, chcr);
-}
-
 static void dmae_init(struct sh_dmae_chan *sh_chan)
 {
        /*
@@ -261,7 +241,7 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
 {
        struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
        struct sh_dmae_pdata *pdata = shdev->pdata;
-       const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
+       const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->shdma_chan.id];
        u16 __iomem *addr = shdev->dmars;
        unsigned int shift = chan_pdata->dmars_bit;
 
@@ -282,706 +262,142 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
        return 0;
 }
 
-static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
+static void sh_dmae_start_xfer(struct shdma_chan *schan,
+                              struct shdma_desc *sdesc)
 {
-       struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
-       struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
-       struct sh_dmae_slave *param = tx->chan->private;
-       dma_async_tx_callback callback = tx->callback;
-       dma_cookie_t cookie;
-       bool power_up;
-
-       spin_lock_irq(&sh_chan->desc_lock);
-
-       if (list_empty(&sh_chan->ld_queue))
-               power_up = true;
-       else
-               power_up = false;
-
-       cookie = dma_cookie_assign(tx);
-
-       /* Mark all chunks of this descriptor as submitted, move to the queue */
-       list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
-               /*
-                * All chunks are on the global ld_free, so, we have to find
-                * the end of the chain ourselves
-                */
-               if (chunk != desc && (chunk->mark == DESC_IDLE ||
-                                     chunk->async_tx.cookie > 0 ||
-                                     chunk->async_tx.cookie == -EBUSY ||
-                                     &chunk->node == &sh_chan->ld_free))
-                       break;
-               chunk->mark = DESC_SUBMITTED;
-               /* Callback goes to the last chunk */
-               chunk->async_tx.callback = NULL;
-               chunk->cookie = cookie;
-               list_move_tail(&chunk->node, &sh_chan->ld_queue);
-               last = chunk;
-       }
-
-       last->async_tx.callback = callback;
-       last->async_tx.callback_param = tx->callback_param;
-
-       dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n",
-               tx->cookie, &last->async_tx, sh_chan->id,
-               desc->hw.sar, desc->hw.tcr, desc->hw.dar);
-
-       if (power_up) {
-               sh_chan->pm_state = DMAE_PM_BUSY;
-
-               pm_runtime_get(sh_chan->dev);
-
-               spin_unlock_irq(&sh_chan->desc_lock);
-
-               pm_runtime_barrier(sh_chan->dev);
-
-               spin_lock_irq(&sh_chan->desc_lock);
-
-               /* Have we been reset, while waiting? */
-               if (sh_chan->pm_state != DMAE_PM_ESTABLISHED) {
-                       dev_dbg(sh_chan->dev, "Bring up channel %d\n",
-                               sh_chan->id);
-                       if (param) {
-                               const struct sh_dmae_slave_config *cfg =
-                                       param->config;
-
-                               dmae_set_dmars(sh_chan, cfg->mid_rid);
-                               dmae_set_chcr(sh_chan, cfg->chcr);
-                       } else {
-                               dmae_init(sh_chan);
-                       }
-
-                       if (sh_chan->pm_state == DMAE_PM_PENDING)
-                               sh_chan_xfer_ld_queue(sh_chan);
-                       sh_chan->pm_state = DMAE_PM_ESTABLISHED;
-               }
-       } else {
-               sh_chan->pm_state = DMAE_PM_PENDING;
-       }
-
-       spin_unlock_irq(&sh_chan->desc_lock);
-
-       return cookie;
+       struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
+                                                   shdma_chan);
+       struct sh_dmae_desc *sh_desc = container_of(sdesc,
+                                       struct sh_dmae_desc, shdma_desc);
+       dev_dbg(sh_chan->shdma_chan.dev, "Queue #%d to %d: %u@%x -> %x\n",
+               sdesc->async_tx.cookie, sh_chan->shdma_chan.id,
+               sh_desc->hw.tcr, sh_desc->hw.sar, sh_desc->hw.dar);
+       /* Get the ld start address from ld_queue */
+       dmae_set_reg(sh_chan, &sh_desc->hw);
+       dmae_start(sh_chan);
 }
 
-/* Called with desc_lock held */
-static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
+static bool sh_dmae_channel_busy(struct shdma_chan *schan)
 {
-       struct sh_desc *desc;
-
-       list_for_each_entry(desc, &sh_chan->ld_free, node)
-               if (desc->mark != DESC_PREPARED) {
-                       BUG_ON(desc->mark != DESC_IDLE);
-                       list_del(&desc->node);
-                       return desc;
-               }
-
-       return NULL;
+       struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
+                                                   shdma_chan);
+       return dmae_is_busy(sh_chan);
 }
 
-static const struct sh_dmae_slave_config *sh_dmae_find_slave(
-       struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param)
+static void sh_dmae_setup_xfer(struct shdma_chan *schan,
+                              struct shdma_slave *sslave)
 {
-       struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
-       struct sh_dmae_pdata *pdata = shdev->pdata;
-       int i;
+       struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
+                                                   shdma_chan);
 
-       if (param->slave_id >= SH_DMA_SLAVE_NUMBER)
-               return NULL;
-
-       for (i = 0; i < pdata->slave_num; i++)
-               if (pdata->slave[i].slave_id == param->slave_id)
-                       return pdata->slave + i;
-
-       return NULL;
-}
-
-static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
-{
-       struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
-       struct sh_desc *desc;
-       struct sh_dmae_slave *param = chan->private;
-       int ret;
+       if (sslave) {
+               struct sh_dmae_slave *slave = container_of(sslave,
+                                       struct sh_dmae_slave, shdma_slave);
+               const struct sh_dmae_slave_config *cfg =
+                       slave->config;
 
-       /*
-        * This relies on the guarantee from dmaengine that alloc_chan_resources
-        * never runs concurrently with itself or free_chan_resources.
-        */
-       if (param) {
-               const struct sh_dmae_slave_config *cfg;
-
-               cfg = sh_dmae_find_slave(sh_chan, param);
-               if (!cfg) {
-                       ret = -EINVAL;
-                       goto efindslave;
-               }
-
-               if (test_and_set_bit(param->slave_id, sh_dmae_slave_used)) {
-                       ret = -EBUSY;
-                       goto etestused;
-               }
-
-               param->config = cfg;
-       }
-
-       while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
-               desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL);
-               if (!desc)
-                       break;
-               dma_async_tx_descriptor_init(&desc->async_tx,
-                                       &sh_chan->common);
-               desc->async_tx.tx_submit = sh_dmae_tx_submit;
-               desc->mark = DESC_IDLE;
-
-               list_add(&desc->node, &sh_chan->ld_free);
-               sh_chan->descs_allocated++;
-       }
-
-       if (!sh_chan->descs_allocated) {
-               ret = -ENOMEM;
-               goto edescalloc;
-       }
-
-       return sh_chan->descs_allocated;
-
-edescalloc:
-       if (param)
-               clear_bit(param->slave_id, sh_dmae_slave_used);
-etestused:
-efindslave:
-       chan->private = NULL;
-       return ret;
-}
-
-/*
- * sh_dma_free_chan_resources - Free all resources of the channel.
- */
-static void sh_dmae_free_chan_resources(struct dma_chan *chan)
-{
-       struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
-       struct sh_desc *desc, *_desc;
-       LIST_HEAD(list);
-
-       /* Protect against ISR */
-       spin_lock_irq(&sh_chan->desc_lock);
-       dmae_halt(sh_chan);
-       spin_unlock_irq(&sh_chan->desc_lock);
-
-       /* Now no new interrupts will occur */
-
-       /* Prepared and not submitted descriptors can still be on the queue */
-       if (!list_empty(&sh_chan->ld_queue))
-               sh_dmae_chan_ld_cleanup(sh_chan, true);
-
-       if (chan->private) {
-               /* The caller is holding dma_list_mutex */
-               struct sh_dmae_slave *param = chan->private;
-               clear_bit(param->slave_id, sh_dmae_slave_used);
-               chan->private = NULL;
-       }
-
-       spin_lock_irq(&sh_chan->desc_lock);
-
-       list_splice_init(&sh_chan->ld_free, &list);
-       sh_chan->descs_allocated = 0;
-
-       spin_unlock_irq(&sh_chan->desc_lock);
-
-       list_for_each_entry_safe(desc, _desc, &list, node)
-               kfree(desc);
-}
-
-/**
- * sh_dmae_add_desc - get, set up and return one transfer descriptor
- * @sh_chan:   DMA channel
- * @flags:     DMA transfer flags
- * @dest:      destination DMA address, incremented when direction equals
- *             DMA_DEV_TO_MEM
- * @src:       source DMA address, incremented when direction equals
- *             DMA_MEM_TO_DEV
- * @len:       DMA transfer length
- * @first:     if NULL, set to the current descriptor and cookie set to -EBUSY
- * @direction: needed for slave DMA to decide which address to keep constant,
- *             equals DMA_MEM_TO_MEM for MEMCPY
- * Returns 0 or an error
- * Locks: called with desc_lock held
- */
-static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
-       unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len,
-       struct sh_desc **first, enum dma_transfer_direction direction)
-{
-       struct sh_desc *new;
-       size_t copy_size;
-
-       if (!*len)
-               return NULL;
-
-       /* Allocate the link descriptor from the free list */
-       new = sh_dmae_get_desc(sh_chan);
-       if (!new) {
-               dev_err(sh_chan->dev, "No free link descriptor available\n");
-               return NULL;
-       }
-
-       copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1);
-
-       new->hw.sar = *src;
-       new->hw.dar = *dest;
-       new->hw.tcr = copy_size;
-
-       if (!*first) {
-               /* First desc */
-               new->async_tx.cookie = -EBUSY;
-               *first = new;
+               dmae_set_dmars(sh_chan, cfg->mid_rid);
+               dmae_set_chcr(sh_chan, cfg->chcr);
        } else {
-               /* Other desc - invisible to the user */
-               new->async_tx.cookie = -EINVAL;
+               dmae_init(sh_chan);
        }
-
-       dev_dbg(sh_chan->dev,
-               "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n",
-               copy_size, *len, *src, *dest, &new->async_tx,
-               new->async_tx.cookie, sh_chan->xmit_shift);
-
-       new->mark = DESC_PREPARED;
-       new->async_tx.flags = flags;
-       new->direction = direction;
-
-       *len -= copy_size;
-       if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV)
-               *src += copy_size;
-       if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM)
-               *dest += copy_size;
-
-       return new;
 }
 
-/*
- * sh_dmae_prep_sg - prepare transfer descriptors from an SG list
- *
- * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
- * converted to scatter-gather to guarantee consistent locking and a correct
- * list manipulation. For slave DMA direction carries the usual meaning, and,
- * logically, the SG list is RAM and the addr variable contains slave address,
- * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
- * and the SG list contains only one element and points at the source buffer.
- */
-static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan,
-       struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
-       enum dma_transfer_direction direction, unsigned long flags)
+static const struct sh_dmae_slave_config *dmae_find_slave(
+       struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *slave)
 {
-       struct scatterlist *sg;
-       struct sh_desc *first = NULL, *new = NULL /* compiler... */;
-       LIST_HEAD(tx_list);
-       int chunks = 0;
-       unsigned long irq_flags;
+       struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
+       struct sh_dmae_pdata *pdata = shdev->pdata;
+       const struct sh_dmae_slave_config *cfg;
        int i;
 
-       if (!sg_len)
+       if (slave->shdma_slave.slave_id >= SH_DMA_SLAVE_NUMBER)
                return NULL;
 
-       for_each_sg(sgl, sg, sg_len, i)
-               chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) /
-                       (SH_DMA_TCR_MAX + 1);
-
-       /* Have to lock the whole loop to protect against concurrent release */
-       spin_lock_irqsave(&sh_chan->desc_lock, irq_flags);
-
-       /*
-        * Chaining:
-        * first descriptor is what user is dealing with in all API calls, its
-        *      cookie is at first set to -EBUSY, at tx-submit to a positive
-        *      number
-        * if more than one chunk is needed further chunks have cookie = -EINVAL
-        * the last chunk, if not equal to the first, has cookie = -ENOSPC
-        * all chunks are linked onto the tx_list head with their .node heads
-        *      only during this function, then they are immediately spliced
-        *      back onto the free list in form of a chain
-        */
-       for_each_sg(sgl, sg, sg_len, i) {
-               dma_addr_t sg_addr = sg_dma_address(sg);
-               size_t len = sg_dma_len(sg);
-
-               if (!len)
-                       goto err_get_desc;
-
-               do {
-                       dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n",
-                               i, sg, len, (unsigned long long)sg_addr);
-
-                       if (direction == DMA_DEV_TO_MEM)
-                               new = sh_dmae_add_desc(sh_chan, flags,
-                                               &sg_addr, addr, &len, &first,
-                                               direction);
-                       else
-                               new = sh_dmae_add_desc(sh_chan, flags,
-                                               addr, &sg_addr, &len, &first,
-                                               direction);
-                       if (!new)
-                               goto err_get_desc;
-
-                       new->chunks = chunks--;
-                       list_add_tail(&new->node, &tx_list);
-               } while (len);
-       }
-
-       if (new != first)
-               new->async_tx.cookie = -ENOSPC;
-
-       /* Put them back on the free list, so, they don't get lost */
-       list_splice_tail(&tx_list, &sh_chan->ld_free);
-
-       spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags);
-
-       return &first->async_tx;
-
-err_get_desc:
-       list_for_each_entry(new, &tx_list, node)
-               new->mark = DESC_IDLE;
-       list_splice(&tx_list, &sh_chan->ld_free);
-
-       spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags);
+       for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
+               if (cfg->slave_id == slave->shdma_slave.slave_id)
+                       return cfg;
 
        return NULL;
 }
 
-static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
-       struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
-       size_t len, unsigned long flags)
+static int sh_dmae_set_slave(struct shdma_chan *schan,
+                            struct shdma_slave *sslave)
 {
-       struct sh_dmae_chan *sh_chan;
-       struct scatterlist sg;
-
-       if (!chan || !len)
-               return NULL;
-
-       sh_chan = to_sh_chan(chan);
-
-       sg_init_table(&sg, 1);
-       sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
-                   offset_in_page(dma_src));
-       sg_dma_address(&sg) = dma_src;
-       sg_dma_len(&sg) = len;
-
-       return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM,
-                              flags);
-}
-
-static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
-       struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
-       enum dma_transfer_direction direction, unsigned long flags,
-       void *context)
-{
-       struct sh_dmae_slave *param;
-       struct sh_dmae_chan *sh_chan;
-       dma_addr_t slave_addr;
-
-       if (!chan)
-               return NULL;
-
-       sh_chan = to_sh_chan(chan);
-       param = chan->private;
-
-       /* Someone calling slave DMA on a public channel? */
-       if (!param || !sg_len) {
-               dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n",
-                        __func__, param, sg_len, param ? param->slave_id : -1);
-               return NULL;
-       }
-
-       slave_addr = param->config->addr;
-
-       /*
-        * if (param != NULL), this is a successfully requested slave channel,
-        * therefore param->config != NULL too.
-        */
-       return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &slave_addr,
-                              direction, flags);
-}
-
-static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-                          unsigned long arg)
-{
-       struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
-       unsigned long flags;
-
-       /* Only supports DMA_TERMINATE_ALL */
-       if (cmd != DMA_TERMINATE_ALL)
-               return -ENXIO;
-
-       if (!chan)
-               return -EINVAL;
-
-       spin_lock_irqsave(&sh_chan->desc_lock, flags);
-       dmae_halt(sh_chan);
-
-       if (!list_empty(&sh_chan->ld_queue)) {
-               /* Record partial transfer */
-               struct sh_desc *desc = list_entry(sh_chan->ld_queue.next,
-                                                 struct sh_desc, node);
-               desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
-                       sh_chan->xmit_shift;
-       }
-       spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
+       struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
+                                                   shdma_chan);
+       struct sh_dmae_slave *slave = container_of(sslave, struct sh_dmae_slave,
+                                                  shdma_slave);
+       const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave);
+       if (!cfg)
+               return -ENODEV;
 
-       sh_dmae_chan_ld_cleanup(sh_chan, true);
+       slave->config = cfg;
 
        return 0;
 }
 
-static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
+static void dmae_halt(struct sh_dmae_chan *sh_chan)
 {
-       struct sh_desc *desc, *_desc;
-       /* Is the "exposed" head of a chain acked? */
-       bool head_acked = false;
-       dma_cookie_t cookie = 0;
-       dma_async_tx_callback callback = NULL;
-       void *param = NULL;
-       unsigned long flags;
-
-       spin_lock_irqsave(&sh_chan->desc_lock, flags);
-       list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
-               struct dma_async_tx_descriptor *tx = &desc->async_tx;
-
-               BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
-               BUG_ON(desc->mark != DESC_SUBMITTED &&
-                      desc->mark != DESC_COMPLETED &&
-                      desc->mark != DESC_WAITING);
-
-               /*
-                * queue is ordered, and we use this loop to (1) clean up all
-                * completed descriptors, and to (2) update descriptor flags of
-                * any chunks in a (partially) completed chain
-                */
-               if (!all && desc->mark == DESC_SUBMITTED &&
-                   desc->cookie != cookie)
-                       break;
-
-               if (tx->cookie > 0)
-                       cookie = tx->cookie;
-
-               if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
-                       if (sh_chan->common.completed_cookie != desc->cookie - 1)
-                               dev_dbg(sh_chan->dev,
-                                       "Completing cookie %d, expected %d\n",
-                                       desc->cookie,
-                                       sh_chan->common.completed_cookie + 1);
-                       sh_chan->common.completed_cookie = desc->cookie;
-               }
-
-               /* Call callback on the last chunk */
-               if (desc->mark == DESC_COMPLETED && tx->callback) {
-                       desc->mark = DESC_WAITING;
-                       callback = tx->callback;
-                       param = tx->callback_param;
-                       dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n",
-                               tx->cookie, tx, sh_chan->id);
-                       BUG_ON(desc->chunks != 1);
-                       break;
-               }
-
-               if (tx->cookie > 0 || tx->cookie == -EBUSY) {
-                       if (desc->mark == DESC_COMPLETED) {
-                               BUG_ON(tx->cookie < 0);
-                               desc->mark = DESC_WAITING;
-                       }
-                       head_acked = async_tx_test_ack(tx);
-               } else {
-                       switch (desc->mark) {
-                       case DESC_COMPLETED:
-                               desc->mark = DESC_WAITING;
-                               /* Fall through */
-                       case DESC_WAITING:
-                               if (head_acked)
-                                       async_tx_ack(&desc->async_tx);
-                       }
-               }
-
-               dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n",
-                       tx, tx->cookie);
-
-               if (((desc->mark == DESC_COMPLETED ||
-                     desc->mark == DESC_WAITING) &&
-                    async_tx_test_ack(&desc->async_tx)) || all) {
-                       /* Remove from ld_queue list */
-                       desc->mark = DESC_IDLE;
-
-                       list_move(&desc->node, &sh_chan->ld_free);
-
-                       if (list_empty(&sh_chan->ld_queue)) {
-                               dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id);
-                               pm_runtime_put(sh_chan->dev);
-                       }
-               }
-       }
-
-       if (all && !callback)
-               /*
-                * Terminating and the loop completed normally: forgive
-                * uncompleted cookies
-                */
-               sh_chan->common.completed_cookie = sh_chan->common.cookie;
-
-       spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
-
-       if (callback)
-               callback(param);
+       struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
+       u32 chcr = chcr_read(sh_chan);
 
-       return callback;
+       chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit);
+       chcr_write(sh_chan, chcr);
 }
 
-/*
- * sh_chan_ld_cleanup - Clean up link descriptors
- *
- * This function cleans up the ld_queue of DMA channel.
- */
-static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
+static int sh_dmae_desc_setup(struct shdma_chan *schan,
+                             struct shdma_desc *sdesc,
+                             dma_addr_t src, dma_addr_t dst, size_t *len)
 {
-       while (__ld_cleanup(sh_chan, all))
-               ;
-}
+       struct sh_dmae_desc *sh_desc = container_of(sdesc,
+                                       struct sh_dmae_desc, shdma_desc);
 
-/* Called under spin_lock_irq(&sh_chan->desc_lock) */
-static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
-{
-       struct sh_desc *desc;
+       if (*len > schan->max_xfer_len)
+               *len = schan->max_xfer_len;
 
-       /* DMA work check */
-       if (dmae_is_busy(sh_chan))
-               return;
-
-       /* Find the first not transferred descriptor */
-       list_for_each_entry(desc, &sh_chan->ld_queue, node)
-               if (desc->mark == DESC_SUBMITTED) {
-                       dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n",
-                               desc->async_tx.cookie, sh_chan->id,
-                               desc->hw.tcr, desc->hw.sar, desc->hw.dar);
-                       /* Get the ld start address from ld_queue */
-                       dmae_set_reg(sh_chan, &desc->hw);
-                       dmae_start(sh_chan);
-                       break;
-               }
-}
+       sh_desc->hw.sar = src;
+       sh_desc->hw.dar = dst;
+       sh_desc->hw.tcr = *len;
 
-static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
-{
-       struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
-
-       spin_lock_irq(&sh_chan->desc_lock);
-       if (sh_chan->pm_state == DMAE_PM_ESTABLISHED)
-               sh_chan_xfer_ld_queue(sh_chan);
-       else
-               sh_chan->pm_state = DMAE_PM_PENDING;
-       spin_unlock_irq(&sh_chan->desc_lock);
+       return 0;
 }
 
-static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
-                                       dma_cookie_t cookie,
-                                       struct dma_tx_state *txstate)
+static void sh_dmae_halt(struct shdma_chan *schan)
 {
-       struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
-       enum dma_status status;
-       unsigned long flags;
-
-       sh_dmae_chan_ld_cleanup(sh_chan, false);
-
-       spin_lock_irqsave(&sh_chan->desc_lock, flags);
-
-       status = dma_cookie_status(chan, cookie, txstate);
-
-       /*
-        * If we don't find cookie on the queue, it has been aborted and we have
-        * to report error
-        */
-       if (status != DMA_SUCCESS) {
-               struct sh_desc *desc;
-               status = DMA_ERROR;
-               list_for_each_entry(desc, &sh_chan->ld_queue, node)
-                       if (desc->cookie == cookie) {
-                               status = DMA_IN_PROGRESS;
-                               break;
-                       }
-       }
-
-       spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
-
-       return status;
+       struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
+                                                   shdma_chan);
+       dmae_halt(sh_chan);
 }
 
-static irqreturn_t sh_dmae_interrupt(int irq, void *data)
+static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq)
 {
-       irqreturn_t ret = IRQ_NONE;
-       struct sh_dmae_chan *sh_chan = data;
-       u32 chcr;
-
-       spin_lock(&sh_chan->desc_lock);
-
-       chcr = chcr_read(sh_chan);
+       struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
+                                                   shdma_chan);
 
-       if (chcr & CHCR_TE) {
-               /* DMA stop */
-               dmae_halt(sh_chan);
-
-               ret = IRQ_HANDLED;
-               tasklet_schedule(&sh_chan->tasklet);
-       }
+       if (!(chcr_read(sh_chan) & CHCR_TE))
+               return false;
 
-       spin_unlock(&sh_chan->desc_lock);
+       /* DMA stop */
+       dmae_halt(sh_chan);
 
-       return ret;
+       return true;
 }
 
 /* Called from error IRQ or NMI */
 static bool sh_dmae_reset(struct sh_dmae_device *shdev)
 {
-       unsigned int handled = 0;
-       int i;
+       bool ret;
 
        /* halt the dma controller */
        sh_dmae_ctl_stop(shdev);
 
        /* We cannot detect, which channel caused the error, have to reset all */
-       for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) {
-               struct sh_dmae_chan *sh_chan = shdev->chan[i];
-               struct sh_desc *desc;
-               LIST_HEAD(dl);
-
-               if (!sh_chan)
-                       continue;
-
-               spin_lock(&sh_chan->desc_lock);
-
-               /* Stop the channel */
-               dmae_halt(sh_chan);
-
-               list_splice_init(&sh_chan->ld_queue, &dl);
-
-               if (!list_empty(&dl)) {
-                       dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id);
-                       pm_runtime_put(sh_chan->dev);
-               }
-               sh_chan->pm_state = DMAE_PM_ESTABLISHED;
-
-               spin_unlock(&sh_chan->desc_lock);
-
-               /* Complete all  */
-               list_for_each_entry(desc, &dl, node) {
-                       struct dma_async_tx_descriptor *tx = &desc->async_tx;
-                       desc->mark = DESC_IDLE;
-                       if (tx->callback)
-                               tx->callback(tx->callback_param);
-               }
-
-               spin_lock(&sh_chan->desc_lock);
-               list_splice(&dl, &sh_chan->ld_free);
-               spin_unlock(&sh_chan->desc_lock);
-
-               handled++;
-       }
+       ret = shdma_reset(&shdev->shdma_dev);
 
        sh_dmae_rst(shdev);
 
-       return !!handled;
+       return ret;
 }
 
 static irqreturn_t sh_dmae_err(int irq, void *data)
@@ -991,35 +407,24 @@ static irqreturn_t sh_dmae_err(int irq, void *data)
        if (!(dmaor_read(shdev) & DMAOR_AE))
                return IRQ_NONE;
 
-       sh_dmae_reset(data);
+       sh_dmae_reset(shdev);
        return IRQ_HANDLED;
 }
 
-static void dmae_do_tasklet(unsigned long data)
+static bool sh_dmae_desc_completed(struct shdma_chan *schan,
+                                  struct shdma_desc *sdesc)
 {
-       struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
-       struct sh_desc *desc;
+       struct sh_dmae_chan *sh_chan = container_of(schan,
+                                       struct sh_dmae_chan, shdma_chan);
+       struct sh_dmae_desc *sh_desc = container_of(sdesc,
+                                       struct sh_dmae_desc, shdma_desc);
        u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
        u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
 
-       spin_lock_irq(&sh_chan->desc_lock);
-       list_for_each_entry(desc, &sh_chan->ld_queue, node) {
-               if (desc->mark == DESC_SUBMITTED &&
-                   ((desc->direction == DMA_DEV_TO_MEM &&
-                     (desc->hw.dar + desc->hw.tcr) == dar_buf) ||
-                    (desc->hw.sar + desc->hw.tcr) == sar_buf)) {
-                       dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
-                               desc->async_tx.cookie, &desc->async_tx,
-                               desc->hw.dar);
-                       desc->mark = DESC_COMPLETED;
-                       break;
-               }
-       }
-       /* Next desc */
-       sh_chan_xfer_ld_queue(sh_chan);
-       spin_unlock_irq(&sh_chan->desc_lock);
-
-       sh_dmae_chan_ld_cleanup(sh_chan, false);
+       return  (sdesc->direction == DMA_DEV_TO_MEM &&
+                (sh_desc->hw.dar + sh_desc->hw.tcr) == dar_buf) ||
+               (sdesc->direction != DMA_DEV_TO_MEM &&
+                (sh_desc->hw.sar + sh_desc->hw.tcr) == sar_buf);
 }
 
 static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev)
@@ -1073,97 +478,174 @@ static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
 static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
                                        int irq, unsigned long flags)
 {
-       int err;
        const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
-       struct platform_device *pdev = to_platform_device(shdev->common.dev);
-       struct sh_dmae_chan *new_sh_chan;
+       struct shdma_dev *sdev = &shdev->shdma_dev;
+       struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev);
+       struct sh_dmae_chan *sh_chan;
+       struct shdma_chan *schan;
+       int err;
 
-       /* alloc channel */
-       new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
-       if (!new_sh_chan) {
-               dev_err(shdev->common.dev,
+       sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
+       if (!sh_chan) {
+               dev_err(sdev->dma_dev.dev,
                        "No free memory for allocating dma channels!\n");
                return -ENOMEM;
        }
 
-       new_sh_chan->pm_state = DMAE_PM_ESTABLISHED;
-
-       /* reference struct dma_device */
-       new_sh_chan->common.device = &shdev->common;
-       dma_cookie_init(&new_sh_chan->common);
+       schan = &sh_chan->shdma_chan;
+       schan->max_xfer_len = SH_DMA_TCR_MAX + 1;
 
-       new_sh_chan->dev = shdev->common.dev;
-       new_sh_chan->id = id;
-       new_sh_chan->irq = irq;
-       new_sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32);
+       shdma_chan_probe(sdev, schan, id);
 
-       /* Init DMA tasklet */
-       tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet,
-                       (unsigned long)new_sh_chan);
-
-       spin_lock_init(&new_sh_chan->desc_lock);
-
-       /* Init descripter manage list */
-       INIT_LIST_HEAD(&new_sh_chan->ld_queue);
-       INIT_LIST_HEAD(&new_sh_chan->ld_free);
-
-       /* Add the channel to DMA device channel list */
-       list_add_tail(&new_sh_chan->common.device_node,
-                       &shdev->common.channels);
-       shdev->common.chancnt++;
+       sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32);
 
+       /* set up channel irq */
        if (pdev->id >= 0)
-               snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
-                        "sh-dmae%d.%d", pdev->id, new_sh_chan->id);
+               snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id),
+                        "sh-dmae%d.%d", pdev->id, id);
        else
-               snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
-                        "sh-dma%d", new_sh_chan->id);
+               snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id),
+                        "sh-dma%d", id);
 
-       /* set up channel irq */
-       err = request_irq(irq, &sh_dmae_interrupt, flags,
-                         new_sh_chan->dev_id, new_sh_chan);
+       err = shdma_request_irq(schan, irq, flags, sh_chan->dev_id);
        if (err) {
-               dev_err(shdev->common.dev, "DMA channel %d request_irq error "
-                       "with return %d\n", id, err);
+               dev_err(sdev->dma_dev.dev,
+                       "DMA channel %d request_irq error %d\n",
+                       id, err);
                goto err_no_irq;
        }
 
-       shdev->chan[id] = new_sh_chan;
+       shdev->chan[id] = sh_chan;
        return 0;
 
 err_no_irq:
        /* remove from dmaengine device node */
-       list_del(&new_sh_chan->common.device_node);
-       kfree(new_sh_chan);
+       shdma_chan_remove(schan);
+       kfree(sh_chan);
        return err;
 }
 
 static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
 {
+       struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
+       struct shdma_chan *schan;
        int i;
 
-       for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) {
-               if (shdev->chan[i]) {
-                       struct sh_dmae_chan *sh_chan = shdev->chan[i];
+       shdma_for_each_chan(schan, &shdev->shdma_dev, i) {
+               struct sh_dmae_chan *sh_chan = container_of(schan,
+                                       struct sh_dmae_chan, shdma_chan);
+               BUG_ON(!schan);
 
-                       free_irq(sh_chan->irq, sh_chan);
+               shdma_free_irq(&sh_chan->shdma_chan);
 
-                       list_del(&sh_chan->common.device_node);
-                       kfree(sh_chan);
-                       shdev->chan[i] = NULL;
+               shdma_chan_remove(schan);
+               kfree(sh_chan);
+       }
+       dma_dev->chancnt = 0;
+}
+
+static void sh_dmae_shutdown(struct platform_device *pdev)
+{
+       struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
+       sh_dmae_ctl_stop(shdev);
+}
+
+static int sh_dmae_runtime_suspend(struct device *dev)
+{
+       return 0;
+}
+
+static int sh_dmae_runtime_resume(struct device *dev)
+{
+       struct sh_dmae_device *shdev = dev_get_drvdata(dev);
+
+       return sh_dmae_rst(shdev);
+}
+
+#ifdef CONFIG_PM
+static int sh_dmae_suspend(struct device *dev)
+{
+       return 0;
+}
+
+static int sh_dmae_resume(struct device *dev)
+{
+       struct sh_dmae_device *shdev = dev_get_drvdata(dev);
+       int i, ret;
+
+       ret = sh_dmae_rst(shdev);
+       if (ret < 0)
+               dev_err(dev, "Failed to reset!\n");
+
+       for (i = 0; i < shdev->pdata->channel_num; i++) {
+               struct sh_dmae_chan *sh_chan = shdev->chan[i];
+               struct sh_dmae_slave *param = sh_chan->shdma_chan.dma_chan.private;
+
+               if (!sh_chan->shdma_chan.desc_num)
+                       continue;
+
+               if (param) {
+                       const struct sh_dmae_slave_config *cfg = param->config;
+                       dmae_set_dmars(sh_chan, cfg->mid_rid);
+                       dmae_set_chcr(sh_chan, cfg->chcr);
+               } else {
+                       dmae_init(sh_chan);
                }
        }
-       shdev->common.chancnt = 0;
+
+       return 0;
 }
+#else
+#define sh_dmae_suspend NULL
+#define sh_dmae_resume NULL
+#endif
 
-static int __init sh_dmae_probe(struct platform_device *pdev)
+const struct dev_pm_ops sh_dmae_pm = {
+       .suspend                = sh_dmae_suspend,
+       .resume                 = sh_dmae_resume,
+       .runtime_suspend        = sh_dmae_runtime_suspend,
+       .runtime_resume         = sh_dmae_runtime_resume,
+};
+
+static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan)
+{
+       struct sh_dmae_slave *param = schan->dma_chan.private;
+
+       /*
+        * Implicit BUG_ON(!param)
+        * if (param != NULL), this is a successfully requested slave channel,
+        * therefore param->config != NULL too.
+        */
+       return param->config->addr;
+}
+
+static struct shdma_desc *sh_dmae_embedded_desc(void *buf, int i)
+{
+       return &((struct sh_dmae_desc *)buf)[i].shdma_desc;
+}
+
+static const struct shdma_ops sh_dmae_shdma_ops = {
+       .desc_completed = sh_dmae_desc_completed,
+       .halt_channel = sh_dmae_halt,
+       .channel_busy = sh_dmae_channel_busy,
+       .slave_addr = sh_dmae_slave_addr,
+       .desc_setup = sh_dmae_desc_setup,
+       .set_slave = sh_dmae_set_slave,
+       .setup_xfer = sh_dmae_setup_xfer,
+       .start_xfer = sh_dmae_start_xfer,
+       .embedded_desc = sh_dmae_embedded_desc,
+       .chan_irq = sh_dmae_chan_irq,
+};
+
+static int __devinit sh_dmae_probe(struct platform_device *pdev)
 {
        struct sh_dmae_pdata *pdata = pdev->dev.platform_data;
        unsigned long irqflags = IRQF_DISABLED,
-               chan_flag[SH_DMAC_MAX_CHANNELS] = {};
-       int errirq, chan_irq[SH_DMAC_MAX_CHANNELS];
+               chan_flag[SH_DMAE_MAX_CHANNELS] = {};
+       int errirq, chan_irq[SH_DMAE_MAX_CHANNELS];
        int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
        struct sh_dmae_device *shdev;
+       struct dma_device *dma_dev;
        struct resource *chan, *dmars, *errirq_res, *chanirq_res;
 
        /* get platform data */
@@ -1211,6 +693,8 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
                goto ealloc;
        }
 
+       dma_dev = &shdev->shdma_dev.dma_dev;
+
        shdev->chan_reg = ioremap(chan->start, resource_size(chan));
        if (!shdev->chan_reg)
                goto emapchan;
@@ -1220,8 +704,23 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
                        goto emapdmars;
        }
 
+       if (!pdata->slave_only)
+               dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+       if (pdata->slave && pdata->slave_num)
+               dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
+
+       /* Default transfer size of 32 bytes requires 32-byte alignment */
+       dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE;
+
+       shdev->shdma_dev.ops = &sh_dmae_shdma_ops;
+       shdev->shdma_dev.desc_size = sizeof(struct sh_dmae_desc);
+       err = shdma_init(&pdev->dev, &shdev->shdma_dev,
+                             pdata->channel_num);
+       if (err < 0)
+               goto eshdma;
+
        /* platform data */
-       shdev->pdata = pdata;
+       shdev->pdata = pdev->dev.platform_data;
 
        if (pdata->chcr_offset)
                shdev->chcr_offset = pdata->chcr_offset;
@@ -1235,10 +734,10 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, shdev);
 
-       shdev->common.dev = &pdev->dev;
-
        pm_runtime_enable(&pdev->dev);
-       pm_runtime_get_sync(&pdev->dev);
+       err = pm_runtime_get_sync(&pdev->dev);
+       if (err < 0)
+               dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err);
 
        spin_lock_irq(&sh_dmae_lock);
        list_add_tail_rcu(&shdev->node, &sh_dmae_devices);
@@ -1249,27 +748,6 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
        if (err)
                goto rst_err;
 
-       INIT_LIST_HEAD(&shdev->common.channels);
-
-       if (!pdata->slave_only)
-               dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
-       if (pdata->slave && pdata->slave_num)
-               dma_cap_set(DMA_SLAVE, shdev->common.cap_mask);
-
-       shdev->common.device_alloc_chan_resources
-               = sh_dmae_alloc_chan_resources;
-       shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources;
-       shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy;
-       shdev->common.device_tx_status = sh_dmae_tx_status;
-       shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
-
-       /* Compulsory for DMA_SLAVE fields */
-       shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
-       shdev->common.device_control = sh_dmae_control;
-
-       /* Default transfer size of 32 bytes requires 32-byte alignment */
-       shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE;
-
 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
        chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
 
@@ -1301,7 +779,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
            !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
                /* Special case - all multiplexed */
                for (; irq_cnt < pdata->channel_num; irq_cnt++) {
-                       if (irq_cnt < SH_DMAC_MAX_CHANNELS) {
+                       if (irq_cnt < SH_DMAE_MAX_CHANNELS) {
                                chan_irq[irq_cnt] = chanirq_res->start;
                                chan_flag[irq_cnt] = IRQF_SHARED;
                        } else {
@@ -1312,7 +790,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
        } else {
                do {
                        for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
-                               if (irq_cnt >= SH_DMAC_MAX_CHANNELS) {
+                               if (irq_cnt >= SH_DMAE_MAX_CHANNELS) {
                                        irq_cap = 1;
                                        break;
                                }
@@ -1328,7 +806,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
                                chan_irq[irq_cnt++] = i;
                        }
 
-                       if (irq_cnt >= SH_DMAC_MAX_CHANNELS)
+                       if (irq_cnt >= SH_DMAE_MAX_CHANNELS)
                                break;
 
                        chanirq_res = platform_get_resource(pdev,
@@ -1346,14 +824,19 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
        if (irq_cap)
                dev_notice(&pdev->dev, "Attempting to register %d DMA "
                           "channels when a maximum of %d are supported.\n",
-                          pdata->channel_num, SH_DMAC_MAX_CHANNELS);
+                          pdata->channel_num, SH_DMAE_MAX_CHANNELS);
 
        pm_runtime_put(&pdev->dev);
 
-       dma_async_device_register(&shdev->common);
+       err = dma_async_device_register(&shdev->shdma_dev.dma_dev);
+       if (err < 0)
+               goto edmadevreg;
 
        return err;
 
+edmadevreg:
+       pm_runtime_get(&pdev->dev);
+
 chan_probe_err:
        sh_dmae_chan_remove(shdev);
 
@@ -1369,10 +852,11 @@ rst_err:
        pm_runtime_put(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
 
+       platform_set_drvdata(pdev, NULL);
+       shdma_cleanup(&shdev->shdma_dev);
+eshdma:
        if (dmars)
                iounmap(shdev->dmars);
-
-       platform_set_drvdata(pdev, NULL);
 emapdmars:
        iounmap(shdev->chan_reg);
        synchronize_rcu();
@@ -1387,13 +871,14 @@ ermrdmars:
        return err;
 }
 
-static int __exit sh_dmae_remove(struct platform_device *pdev)
+static int __devexit sh_dmae_remove(struct platform_device *pdev)
 {
        struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
+       struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
        struct resource *res;
        int errirq = platform_get_irq(pdev, 0);
 
-       dma_async_device_unregister(&shdev->common);
+       dma_async_device_unregister(dma_dev);
 
        if (errirq > 0)
                free_irq(errirq, shdev);
@@ -1402,11 +887,11 @@ static int __exit sh_dmae_remove(struct platform_device *pdev)
        list_del_rcu(&shdev->node);
        spin_unlock_irq(&sh_dmae_lock);
 
-       /* channel data remove */
-       sh_dmae_chan_remove(shdev);
-
        pm_runtime_disable(&pdev->dev);
 
+       sh_dmae_chan_remove(shdev);
+       shdma_cleanup(&shdev->shdma_dev);
+
        if (shdev->dmars)
                iounmap(shdev->dmars);
        iounmap(shdev->chan_reg);
@@ -1426,77 +911,14 @@ static int __exit sh_dmae_remove(struct platform_device *pdev)
        return 0;
 }
 
-static void sh_dmae_shutdown(struct platform_device *pdev)
-{
-       struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
-       sh_dmae_ctl_stop(shdev);
-}
-
-static int sh_dmae_runtime_suspend(struct device *dev)
-{
-       return 0;
-}
-
-static int sh_dmae_runtime_resume(struct device *dev)
-{
-       struct sh_dmae_device *shdev = dev_get_drvdata(dev);
-
-       return sh_dmae_rst(shdev);
-}
-
-#ifdef CONFIG_PM
-static int sh_dmae_suspend(struct device *dev)
-{
-       return 0;
-}
-
-static int sh_dmae_resume(struct device *dev)
-{
-       struct sh_dmae_device *shdev = dev_get_drvdata(dev);
-       int i, ret;
-
-       ret = sh_dmae_rst(shdev);
-       if (ret < 0)
-               dev_err(dev, "Failed to reset!\n");
-
-       for (i = 0; i < shdev->pdata->channel_num; i++) {
-               struct sh_dmae_chan *sh_chan = shdev->chan[i];
-               struct sh_dmae_slave *param = sh_chan->common.private;
-
-               if (!sh_chan->descs_allocated)
-                       continue;
-
-               if (param) {
-                       const struct sh_dmae_slave_config *cfg = param->config;
-                       dmae_set_dmars(sh_chan, cfg->mid_rid);
-                       dmae_set_chcr(sh_chan, cfg->chcr);
-               } else {
-                       dmae_init(sh_chan);
-               }
-       }
-
-       return 0;
-}
-#else
-#define sh_dmae_suspend NULL
-#define sh_dmae_resume NULL
-#endif
-
-const struct dev_pm_ops sh_dmae_pm = {
-       .suspend                = sh_dmae_suspend,
-       .resume                 = sh_dmae_resume,
-       .runtime_suspend        = sh_dmae_runtime_suspend,
-       .runtime_resume         = sh_dmae_runtime_resume,
-};
-
 static struct platform_driver sh_dmae_driver = {
-       .remove         = __exit_p(sh_dmae_remove),
-       .shutdown       = sh_dmae_shutdown,
-       .driver = {
+       .driver         = {
                .owner  = THIS_MODULE,
-               .name   = "sh-dma-engine",
                .pm     = &sh_dmae_pm,
+               .name   = SH_DMAE_DRV_NAME,
        },
+       .remove         = __devexit_p(sh_dmae_remove),
+       .shutdown       = sh_dmae_shutdown,
 };
 
 static int __init sh_dmae_init(void)
@@ -1521,4 +943,4 @@ module_exit(sh_dmae_exit);
 MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
 MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:sh-dma-engine");
+MODULE_ALIAS("platform:" SH_DMAE_DRV_NAME);
index 0b1d2c1..840e47d 100644 (file)
 #ifndef __DMA_SHDMA_H
 #define __DMA_SHDMA_H
 
+#include <linux/shdma-base.h>
 #include <linux/dmaengine.h>
 #include <linux/interrupt.h>
 #include <linux/list.h>
 
-#define SH_DMAC_MAX_CHANNELS 20
-#define SH_DMA_SLAVE_NUMBER 256
-#define SH_DMA_TCR_MAX 0x00FFFFFF      /* 16MB */
+#define SH_DMAE_MAX_CHANNELS 20
+#define SH_DMAE_TCR_MAX 0x00FFFFFF     /* 16MB */
 
 struct device;
 
-enum dmae_pm_state {
-       DMAE_PM_ESTABLISHED,
-       DMAE_PM_BUSY,
-       DMAE_PM_PENDING,
-};
-
 struct sh_dmae_chan {
-       spinlock_t desc_lock;           /* Descriptor operation lock */
-       struct list_head ld_queue;      /* Link descriptors queue */
-       struct list_head ld_free;       /* Link descriptors free */
-       struct dma_chan common;         /* DMA common channel */
-       struct device *dev;             /* Channel device */
-       struct tasklet_struct tasklet;  /* Tasklet */
-       int descs_allocated;            /* desc count */
+       struct shdma_chan shdma_chan;
        int xmit_shift;                 /* log_2(bytes_per_xfer) */
-       int irq;
-       int id;                         /* Raw id of this channel */
        u32 __iomem *base;
        char dev_id[16];                /* unique name per DMAC of channel */
        int pm_error;
-       enum dmae_pm_state pm_state;
 };
 
 struct sh_dmae_device {
-       struct dma_device common;
-       struct sh_dmae_chan *chan[SH_DMAC_MAX_CHANNELS];
+       struct shdma_dev shdma_dev;
+       struct sh_dmae_chan *chan[SH_DMAE_MAX_CHANNELS];
        struct sh_dmae_pdata *pdata;
        struct list_head node;
        u32 __iomem *chan_reg;
@@ -57,10 +42,21 @@ struct sh_dmae_device {
        u32 chcr_ie_bit;
 };
 
-#define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, common)
+struct sh_dmae_regs {
+       u32 sar; /* SAR / source address */
+       u32 dar; /* DAR / destination address */
+       u32 tcr; /* TCR / transfer count */
+};
+
+struct sh_dmae_desc {
+       struct sh_dmae_regs hw;
+       struct shdma_desc shdma_desc;
+};
+
+#define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, shdma_chan)
 #define to_sh_desc(lh) container_of(lh, struct sh_desc, node)
 #define tx_to_sh_desc(tx) container_of(tx, struct sh_desc, async_tx)
-#define to_sh_dev(chan) container_of(chan->common.device,\
-                                    struct sh_dmae_device, common)
+#define to_sh_dev(chan) container_of(chan->shdma_chan.dma_chan.device,\
+                                    struct sh_dmae_device, shdma_dev.dma_dev)
 
 #endif /* __DMA_SHDMA_H */
index e081e8e..7c8ca41 100644 (file)
 #include <linux/dmaengine.h>
 #include <linux/list.h>
 #include <linux/shdma-base.h>
+#include <linux/types.h>
+
+struct device;
 
 /* Used by slave DMA clients to request DMA to/from a specific peripheral */
 struct sh_dmae_slave {
-       union {
-               unsigned int            slave_id; /* Set by the platform */
-               struct shdma_slave      shdma_slave;
-       };
-       struct device                   *dma_dev; /* Set by the platform */
-       const struct sh_dmae_slave_config       *config;  /* Set by the driver */
-};
-
-struct sh_dmae_regs {
-       u32 sar; /* SAR / source address */
-       u32 dar; /* DAR / destination address */
-       u32 tcr; /* TCR / transfer count */
-};
-
-struct sh_desc {
-       struct sh_dmae_regs hw;
-       struct list_head node;
-       struct dma_async_tx_descriptor async_tx;
-       enum dma_transfer_direction direction;
-       dma_cookie_t cookie;
-       size_t partial;
-       int chunks;
-       int mark;
+       struct shdma_slave              shdma_slave;    /* Set by the platform */
+       struct device                   *dma_dev;       /* Set by the platform */
+       const struct sh_dmae_slave_config *config;      /* Set by the driver */
 };
 
+/*
+ * Supplied by platforms to specify, how a DMA channel has to be configured for
+ * a certain peripheral
+ */
 struct sh_dmae_slave_config {
        unsigned int                    slave_id;
        dma_addr_t                      addr;