(we don't have this driver in our tree)
Signed-off-by: Darren Hart <dvhart@linux.intel.com>
---
- drivers/gpu/drm/ast/ast_drv.c | 2 +-
- drivers/gpu/drm/ast/ast_drv.h | 3 ---
- drivers/gpu/drm/ast/ast_main.c | 7 -------
- drivers/gpu/drm/cirrus/cirrus_drv.c | 2 +-
- drivers/gpu/drm/cirrus/cirrus_drv.h | 3 ---
- drivers/gpu/drm/cirrus/cirrus_main.c | 7 -------
- drivers/gpu/drm/drm_gem.c | 14 ++++++++++++++
- drivers/gpu/drm/drm_gem_cma_helper.c | 10 ----------
- drivers/gpu/drm/exynos/exynos_drm_drv.c | 2 +-
- drivers/gpu/drm/exynos/exynos_drm_gem.c | 22 ----------------------
- drivers/gpu/drm/exynos/exynos_drm_gem.h | 9 ---------
- drivers/gpu/drm/gma500/gem.c | 17 -----------------
- drivers/gpu/drm/gma500/psb_drv.c | 2 +-
- drivers/gpu/drm/gma500/psb_drv.h | 2 --
- drivers/gpu/drm/i915/i915_drv.c | 2 +-
- drivers/gpu/drm/i915/i915_drv.h | 2 --
- drivers/gpu/drm/i915/i915_gem.c | 7 -------
- drivers/gpu/drm/mgag200/mgag200_drv.c | 2 +-
- drivers/gpu/drm/mgag200/mgag200_drv.h | 3 ---
- drivers/gpu/drm/mgag200/mgag200_main.c | 7 -------
- drivers/gpu/drm/nouveau/nouveau_display.c | 7 -------
- drivers/gpu/drm/nouveau/nouveau_display.h | 2 --
- drivers/gpu/drm/nouveau/nouveau_drm.c | 2 +-
- drivers/gpu/drm/omapdrm/omap_drv.c | 2 +-
- drivers/gpu/drm/omapdrm/omap_drv.h | 2 --
- drivers/gpu/drm/omapdrm/omap_gem.c | 15 ---------------
- drivers/gpu/drm/qxl/qxl_drv.c | 2 +-
- drivers/gpu/drm/qxl/qxl_drv.h | 3 ---
- drivers/gpu/drm/qxl/qxl_dumb.c | 7 -------
- drivers/gpu/drm/radeon/radeon.h | 3 ---
- drivers/gpu/drm/radeon/radeon_drv.c | 5 +----
- drivers/gpu/drm/radeon/radeon_gem.c | 7 -------
- drivers/gpu/drm/shmobile/shmob_drm_drv.c | 2 +-
- drivers/gpu/drm/tilcdc/tilcdc_drv.c | 2 +-
- drivers/gpu/drm/udl/udl_drv.c | 2 +-
- drivers/gpu/drm/udl/udl_drv.h | 2 --
- drivers/gpu/drm/udl/udl_gem.c | 6 ------
- drivers/gpu/host1x/drm/drm.c | 2 +-
- drivers/gpu/host1x/drm/gem.c | 6 ------
- drivers/gpu/host1x/drm/gem.h | 2 --
- drivers/staging/imx-drm/imx-drm-core.c | 2 +-
- include/drm/drmP.h | 3 +++
- include/drm/drm_gem_cma_helper.h | 8 --------
+ drivers/gpu/drm/ast/ast_drv.c | 2 +-
+ drivers/gpu/drm/ast/ast_drv.h | 3 ---
+ drivers/gpu/drm/ast/ast_main.c | 7 -------
+ drivers/gpu/drm/cirrus/cirrus_drv.c | 2 +-
+ drivers/gpu/drm/cirrus/cirrus_drv.h | 3 ---
+ drivers/gpu/drm/cirrus/cirrus_main.c | 7 -------
+ drivers/gpu/drm/drm_gem.c | 14 ++++++++++++++
+ drivers/gpu/drm/drm_gem_cma_helper.c | 10 ----------
+ drivers/gpu/drm/exynos/exynos_drm_drv.c | 2 +-
+ drivers/gpu/drm/exynos/exynos_drm_gem.c | 22 ----------------------
+ drivers/gpu/drm/exynos/exynos_drm_gem.h | 9 ---------
+ drivers/gpu/drm/gma500/gem.c | 17 -----------------
+ drivers/gpu/drm/gma500/psb_drv.c | 2 +-
+ drivers/gpu/drm/gma500/psb_drv.h | 2 --
+ drivers/gpu/drm/i915/i915_drv.c | 2 +-
+ drivers/gpu/drm/i915/i915_drv.h | 2 --
+ drivers/gpu/drm/i915/i915_gem.c | 7 -------
+ drivers/gpu/drm/mgag200/mgag200_drv.c | 2 +-
+ drivers/gpu/drm/mgag200/mgag200_drv.h | 3 ---
+ drivers/gpu/drm/mgag200/mgag200_main.c | 7 -------
+ drivers/gpu/drm/nouveau/nouveau_display.c | 7 -------
+ drivers/gpu/drm/nouveau/nouveau_display.h | 2 --
+ drivers/gpu/drm/nouveau/nouveau_drm.c | 2 +-
+ drivers/gpu/drm/omapdrm/omap_drv.c | 2 +-
+ drivers/gpu/drm/omapdrm/omap_drv.h | 2 --
+ drivers/gpu/drm/omapdrm/omap_gem.c | 15 ---------------
+ drivers/gpu/drm/qxl/qxl_drv.c | 2 +-
+ drivers/gpu/drm/qxl/qxl_drv.h | 3 ---
+ drivers/gpu/drm/qxl/qxl_dumb.c | 7 -------
+ drivers/gpu/drm/radeon/radeon.h | 3 ---
+ drivers/gpu/drm/radeon/radeon_drv.c | 5 +----
+ drivers/gpu/drm/radeon/radeon_gem.c | 7 -------
+ drivers/gpu/drm/shmobile/shmob_drm_drv.c | 2 +-
+ drivers/gpu/drm/tilcdc/tilcdc_drv.c | 2 +-
+ drivers/gpu/drm/udl/udl_drv.c | 2 +-
+ drivers/gpu/drm/udl/udl_drv.h | 2 --
+ drivers/gpu/drm/udl/udl_gem.c | 6 ------
+ drivers/gpu/host1x/drm/drm.c | 2 +-
+ drivers/gpu/host1x/drm/gem.c | 6 ------
+ drivers/gpu/host1x/drm/gem.h | 2 --
+ drivers/staging/imx-drm/imx-drm-core.c | 2 +-
+ include/drm/drmP.h | 3 +++
+ include/drm/drm_gem_cma_helper.h | 8 --------
43 files changed, 32 insertions(+), 187 deletions(-)
-diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
-index df0d0a08097a..a144fb044852 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -216,7 +216,7 @@ static struct drm_driver driver = {
};
-diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
-index b6b7d70f2832..68e1d324005a 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -322,9 +322,6 @@ ast_bo(struct ttm_buffer_object *bo)
extern int ast_gem_init_object(struct drm_gem_object *obj);
extern void ast_gem_free_object(struct drm_gem_object *obj);
-diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
-index c195dc2abc09..7f6152d374ca 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
-@@ -449,13 +449,6 @@ int ast_dumb_create(struct drm_file *file,
+@@ -449,13 +449,6 @@ int ast_dumb_create(struct drm_file *fil
return 0;
}
int ast_gem_init_object(struct drm_gem_object *obj)
{
BUG();
-diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
-index 8ecb601152ef..d35d99c15f84 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -102,7 +102,7 @@ static struct drm_driver driver = {
};
static struct pci_driver cirrus_pci_driver = {
-diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
-index 7ca059596887..33a0f991b0fc 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
-@@ -203,9 +203,6 @@ int cirrus_gem_create(struct drm_device *dev,
+@@ -203,9 +203,6 @@ int cirrus_gem_create(struct drm_device
int cirrus_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
int cirrus_framebuffer_init(struct drm_device *dev,
struct cirrus_framebuffer *gfb,
-diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
-index 3a7a0efe3675..f130a533a512 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
-@@ -255,13 +255,6 @@ int cirrus_dumb_create(struct drm_file *file,
+@@ -255,13 +255,6 @@ int cirrus_dumb_create(struct drm_file *
return 0;
}
int cirrus_gem_init_object(struct drm_gem_object *obj)
{
BUG();
-diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
-index 2688795172f9..ee9ddc856710 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
-@@ -244,6 +244,20 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
+@@ -244,6 +244,20 @@ drm_gem_handle_delete(struct drm_file *f
EXPORT_SYMBOL(drm_gem_handle_delete);
/**
+ * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
-+ *
++ *
+ * This implements the ->dumb_destroy kms driver callback for drivers which use
+ * gem to manage their backing storage.
+ */
* Create a handle for this object. This adds a handle reference
* to the object, which includes a regular reference count. Callers
* will likely want to dereference the object afterwards.
-diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
-index 11b616ef9dc2..3ec218376734 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
-@@ -235,16 +235,6 @@ int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
+@@ -284,16 +284,6 @@ int drm_gem_cma_mmap(struct file *filp,
}
EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
#ifdef CONFIG_DEBUG_FS
void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m)
{
-diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
-index ba6d995e4375..1ff89aca1fed 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
-@@ -276,7 +276,7 @@ static struct drm_driver exynos_drm_driver = {
+@@ -276,7 +276,7 @@ static struct drm_driver exynos_drm_driv
.gem_vm_ops = &exynos_drm_gem_vm_ops,
.dumb_create = exynos_drm_gem_dumb_create,
.dumb_map_offset = exynos_drm_gem_dumb_map_offset,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = exynos_dmabuf_prime_export,
-diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
-index 408b71f4c95e..e83930fdf6c7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -735,28 +735,6 @@ unlock:
int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct drm_gem_object *obj = vma->vm_private_data;
-diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
-index 468766bee450..09555afdfe9c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
-@@ -151,15 +151,6 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
+@@ -151,15 +151,6 @@ int exynos_drm_gem_dumb_map_offset(struc
struct drm_device *dev, uint32_t handle,
uint64_t *offset);
/* page fault handler and mmap fault address(virtual) to physical memory. */
int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
-diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
-index 2f77bea30b11..10ae8c52d06f 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
-@@ -162,23 +162,6 @@ int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+@@ -162,23 +162,6 @@ int psb_gem_dumb_create(struct drm_file
}
/**
* psb_gem_fault - pagefault handler for GEM objects
* @vma: the VMA of the GEM object
* @vmf: fault detail
-diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
-index bddea5807442..ed06d5ce3757 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -652,7 +652,7 @@ static struct drm_driver driver = {
.fops = &psb_gem_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
-diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
-index 6053b8abcd12..984cacfcbaf2 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
-@@ -838,8 +838,6 @@ extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
+@@ -838,8 +838,6 @@ extern int psb_gem_get_aperture(struct d
struct drm_file *file);
extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
extern int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
-diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
-index 01d63a0435fb..13457e3e9cad 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1038,7 +1038,7 @@ static struct drm_driver driver = {
.ioctls = i915_ioctls,
.fops = &i915_driver_fops,
.name = DRIVER_NAME,
-diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
-index 62ec760782f5..06c31752fcb2 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
-@@ -1775,8 +1775,6 @@ int i915_gem_dumb_create(struct drm_file *file_priv,
+@@ -1775,8 +1775,6 @@ int i915_gem_dumb_create(struct drm_file
struct drm_mode_create_dumb *args);
int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
/**
* Returns true if seq1 is later than seq2.
*/
-diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
-index d31e15dd173c..967fe650fa8b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
-@@ -245,13 +245,6 @@ i915_gem_dumb_create(struct drm_file *file,
+@@ -245,13 +245,6 @@ i915_gem_dumb_create(struct drm_file *fi
args->size, &args->handle);
}
/**
* Creates a new mm object and returns a handle to it.
*/
-diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
-index 122b571ccc7c..bd9196478735 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -104,7 +104,7 @@ static struct drm_driver driver = {
};
static struct pci_driver mgag200_pci_driver = {
-diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
-index 988911afcc8b..e61ce34910d6 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
-@@ -248,9 +248,6 @@ int mgag200_gem_init_object(struct drm_gem_object *obj);
+@@ -248,9 +248,6 @@ int mgag200_gem_init_object(struct drm_g
int mgag200_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
void mgag200_gem_free_object(struct drm_gem_object *obj);
int
mgag200_dumb_mmap_offset(struct drm_file *file,
-diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
-index 2d56e28d2b21..4529d4dd12c2 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
-@@ -291,13 +291,6 @@ int mgag200_dumb_create(struct drm_file *file,
+@@ -291,13 +291,6 @@ int mgag200_dumb_create(struct drm_file
return 0;
}
int mgag200_gem_init_object(struct drm_gem_object *obj)
{
BUG();
-diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
-index 52498de87a3b..05ae27277543 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
-@@ -689,13 +689,6 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
+@@ -689,13 +689,6 @@ nouveau_display_dumb_create(struct drm_f
}
int
nouveau_display_dumb_map_offset(struct drm_file *file_priv,
struct drm_device *dev,
uint32_t handle, uint64_t *poffset)
-diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
-index 1ea3e4734b62..185e74132a6d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
-@@ -68,8 +68,6 @@ int nouveau_display_dumb_create(struct drm_file *, struct drm_device *,
+@@ -68,8 +68,6 @@ int nouveau_display_dumb_create(struct
struct drm_mode_create_dumb *args);
int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *,
u32 handle, u64 *offset);
void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *);
-diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
-index 383f4e6ea9d1..b77bcb9237e0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -714,7 +714,7 @@ driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
-diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
-index 826586ffbe83..75886a3bf639 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
-@@ -618,7 +618,7 @@ static struct drm_driver omap_drm_driver = {
+@@ -618,7 +618,7 @@ static struct drm_driver omap_drm_driver
.gem_vm_ops = &omap_gem_vm_ops,
.dumb_create = omap_gem_dumb_create,
.dumb_map_offset = omap_gem_dumb_map_offset,
.ioctls = ioctls,
.num_ioctls = DRM_OMAP_NUM_IOCTLS,
.fops = &omapdriver_fops,
-diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
-index 215a20dd340c..fd13601ff6fb 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
-@@ -224,8 +224,6 @@ int omap_gem_init_object(struct drm_gem_object *obj);
+@@ -224,8 +224,6 @@ int omap_gem_init_object(struct drm_gem_
void *omap_gem_vaddr(struct drm_gem_object *obj);
int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
-index f90531fc00c9..b1f19702550f 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
-@@ -629,21 +629,6 @@ int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+@@ -629,21 +629,6 @@ int omap_gem_dumb_create(struct drm_file
}
/**
* omap_gem_dumb_map - buffer mapping for dumb interface
* @file: our drm client file
* @dev: drm device
-diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
-index aa291d8a98a2..60cb159c4f7d 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -99,7 +99,7 @@ static struct drm_driver qxl_driver = {
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = qxl_debugfs_init,
.debugfs_cleanup = qxl_debugfs_takedown,
-diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
-index 43d06ab28a21..089fd42802dd 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
-@@ -409,9 +409,6 @@ int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
+@@ -409,9 +409,6 @@ int qxl_bo_kmap(struct qxl_bo *bo, void
int qxl_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
int qxl_mode_dumb_mmap(struct drm_file *filp,
struct drm_device *dev,
uint32_t handle, uint64_t *offset_p);
-diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c
-index 847c4ee798f7..d34bb4130ff0 100644
--- a/drivers/gpu/drm/qxl/qxl_dumb.c
+++ b/drivers/gpu/drm/qxl/qxl_dumb.c
-@@ -68,13 +68,6 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
+@@ -68,13 +68,6 @@ int qxl_mode_dumb_create(struct drm_file
return 0;
}
int qxl_mode_dumb_mmap(struct drm_file *file_priv,
struct drm_device *dev,
uint32_t handle, uint64_t *offset_p)
-diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
-index d4ff48ce1d8b..0fbc44e468da 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
-@@ -444,9 +444,6 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
+@@ -444,9 +444,6 @@ int radeon_mode_dumb_create(struct drm_f
int radeon_mode_dumb_mmap(struct drm_file *filp,
struct drm_device *dev,
uint32_t handle, uint64_t *offset_p);
/*
* Semaphores.
-diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
-index 094e7e5ea39e..bef72931ea08 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
-@@ -119,9 +119,6 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
+@@ -119,9 +119,6 @@ int radeon_mode_dumb_mmap(struct drm_fil
int radeon_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
.fops = &radeon_driver_kms_fops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
-diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
-index aa796031ab65..dce99c8a5835 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
-@@ -570,13 +570,6 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
+@@ -570,13 +570,6 @@ int radeon_mode_dumb_create(struct drm_f
return 0;
}
#if defined(CONFIG_DEBUG_FS)
static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
{
-diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
-index f6e0b5395051..946bd28bf5da 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
-@@ -285,7 +285,7 @@ static struct drm_driver shmob_drm_driver = {
+@@ -285,7 +285,7 @@ static struct drm_driver shmob_drm_drive
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
.fops = &shmob_drm_fops,
.name = "shmob-drm",
.desc = "Renesas SH Mobile DRM",
-diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
-index 2b5461bcd9fb..bba8daf9230c 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
-@@ -490,7 +490,7 @@ static struct drm_driver tilcdc_driver = {
+@@ -490,7 +490,7 @@ static struct drm_driver tilcdc_driver =
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = tilcdc_debugfs_init,
.debugfs_cleanup = tilcdc_debugfs_cleanup,
-diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
-index c0770dbba74a..bb0af58c769a 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -84,7 +84,7 @@ static struct drm_driver driver = {
.fops = &udl_driver_fops,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
-diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
-index cc6d90f28c71..56aec9409fa3 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
-@@ -114,8 +114,6 @@ int udl_dumb_create(struct drm_file *file_priv,
+@@ -114,8 +114,6 @@ int udl_dumb_create(struct drm_file *fil
struct drm_mode_create_dumb *args);
int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
int udl_gem_init_object(struct drm_gem_object *obj);
void udl_gem_free_object(struct drm_gem_object *gem_obj);
-diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
-index 2a4cb2f83b36..b5e3b8038253 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
-@@ -66,12 +66,6 @@ int udl_dumb_create(struct drm_file *file,
+@@ -66,12 +66,6 @@ int udl_dumb_create(struct drm_file *fil
args->size, &args->handle);
}
int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
int ret;
-diff --git a/drivers/gpu/host1x/drm/drm.c b/drivers/gpu/host1x/drm/drm.c
-index 2b561c9118c6..da15a6291bb9 100644
--- a/drivers/gpu/host1x/drm/drm.c
+++ b/drivers/gpu/host1x/drm/drm.c
@@ -625,7 +625,7 @@ struct drm_driver tegra_drm_driver = {
.ioctls = tegra_drm_ioctls,
.num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
-diff --git a/drivers/gpu/host1x/drm/gem.c b/drivers/gpu/host1x/drm/gem.c
-index bc323b3dbe4d..3c35622c9f15 100644
--- a/drivers/gpu/host1x/drm/gem.c
+++ b/drivers/gpu/host1x/drm/gem.c
-@@ -261,9 +261,3 @@ int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
+@@ -261,9 +261,3 @@ int tegra_drm_mmap(struct file *file, st
return ret;
}
-{
- return drm_gem_handle_delete(file, handle);
-}
-diff --git a/drivers/gpu/host1x/drm/gem.h b/drivers/gpu/host1x/drm/gem.h
-index 34de2b486eb7..2e93b0379da8 100644
--- a/drivers/gpu/host1x/drm/gem.h
+++ b/drivers/gpu/host1x/drm/gem.h
-@@ -49,8 +49,6 @@ int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
+@@ -49,8 +49,6 @@ int tegra_bo_dumb_create(struct drm_file
struct drm_mode_create_dumb *args);
int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
uint32_t handle, uint64_t *offset);
int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma);
-diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
-index a532ca568526..a18622570812 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
-@@ -801,7 +801,7 @@ static struct drm_driver imx_drm_driver = {
+@@ -801,7 +801,7 @@ static struct drm_driver imx_drm_driver
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
.get_vblank_counter = drm_vblank_count,
.enable_vblank = imx_drm_enable_vblank,
-diff --git a/include/drm/drmP.h b/include/drm/drmP.h
-index bf677c0b4cae..9a8ea57e3b94 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
-@@ -1589,6 +1589,9 @@ extern int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **
+@@ -1589,6 +1589,9 @@ extern int drm_prime_sg_to_page_addr_arr
extern struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages);
extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg);
void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
-diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h
-index 63397ced9254..632a6c50fab7 100644
--- a/include/drm/drm_gem_cma_helper.h
+++ b/include/drm/drm_gem_cma_helper.h
-@@ -27,14 +27,6 @@ int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
+@@ -30,14 +30,6 @@ int drm_gem_cma_dumb_map_offset(struct d
/* set vm_flags and we can change the vm attribute to other one at here. */
int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma);
/* allocate physical memory. */
struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
unsigned int size);
---
-1.8.5.rc3
-
drivers/dma/dw/Makefile | 1 +
drivers/dma/{ => dw}/dw_dmac.c | 2 +-
drivers/dma/{ => dw}/dw_dmac_regs.h | 0
- 7 files changed, 28 insertions(+), 23 deletions(-)
+ MAINTAINERS | 3
+ drivers/dma/Kconfig | 20
+ drivers/dma/Makefile | 2
+ drivers/dma/dw/Kconfig | 23
+ drivers/dma/dw/Makefile | 1
+ drivers/dma/dw/dw_dmac.c | 1969 ++++++++++++++++++++++++++++++++++++++++++
+ drivers/dma/dw/dw_dmac_regs.h | 311 ++++++
+ drivers/dma/dw_dmac.c | 1969 ------------------------------------------
+ drivers/dma/dw_dmac_regs.h | 311 ------
+ 9 files changed, 2307 insertions(+), 2302 deletions(-)
create mode 100644 drivers/dma/dw/Kconfig
create mode 100644 drivers/dma/dw/Makefile
rename drivers/dma/{ => dw}/dw_dmac.c (99%)
rename drivers/dma/{ => dw}/dw_dmac_regs.h (100%)
-diff --git a/MAINTAINERS b/MAINTAINERS
-index 30287b8a223a..124d32cae616 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
-@@ -6991,8 +6991,7 @@ SYNOPSYS DESIGNWARE DMAC DRIVER
+@@ -6998,8 +6998,7 @@ SYNOPSYS DESIGNWARE DMAC DRIVER
M: Viresh Kumar <viresh.linux@gmail.com>
S: Maintained
F: include/linux/dw_dmac.h
SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER
M: Seungwon Jeon <tgih.jun@samsung.com>
-diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
-index e9924898043a..146a1d864a71 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -79,25 +79,7 @@ config INTEL_IOP_ADMA
config AT_HDMAC
tristate "Atmel AHB DMA support"
-diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
-index a2b0df591f95..ac44ca0d468a 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -15,7 +15,7 @@ obj-$(CONFIG_FSL_DMA) += fsldma.o
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
obj-$(CONFIG_MX3_IPU) += ipu/
obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
-diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig
-new file mode 100644
-index 000000000000..38a215af5ccc
--- /dev/null
+++ b/drivers/dma/dw/Kconfig
@@ -0,0 +1,23 @@
+ like the Atmel AVR32 architecture.
+
+ If unsure, use the default setting.
-diff --git a/drivers/dma/dw/Makefile b/drivers/dma/dw/Makefile
-new file mode 100644
-index 000000000000..dd8d9936beef
--- /dev/null
+++ b/drivers/dma/dw/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_DW_DMAC) += dw_dmac.o
-diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw/dw_dmac.c
-similarity index 99%
-rename from drivers/dma/dw_dmac.c
-rename to drivers/dma/dw/dw_dmac.c
-index 2b65ba614e60..15f3f4f79c10 100644
---- a/drivers/dma/dw_dmac.c
+--- /dev/null
+++ b/drivers/dma/dw/dw_dmac.c
-@@ -28,8 +28,8 @@
- #include <linux/acpi.h>
- #include <linux/acpi_dma.h>
-
+@@ -0,0 +1,1969 @@
++/*
++ * Core driver for the Synopsys DesignWare DMA Controller
++ *
++ * Copyright (C) 2007-2008 Atmel Corporation
++ * Copyright (C) 2010-2011 ST Microelectronics
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/bitops.h>
++#include <linux/clk.h>
++#include <linux/delay.h>
++#include <linux/dmaengine.h>
++#include <linux/dma-mapping.h>
++#include <linux/dmapool.h>
++#include <linux/err.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/io.h>
++#include <linux/of.h>
++#include <linux/of_dma.h>
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/slab.h>
++#include <linux/acpi.h>
++#include <linux/acpi_dma.h>
++
+#include "../dmaengine.h"
- #include "dw_dmac_regs.h"
--#include "dmaengine.h"
-
- /*
- * This supports the Synopsys "DesignWare AHB Central DMA Controller",
-diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw/dw_dmac_regs.h
-similarity index 100%
-rename from drivers/dma/dw_dmac_regs.h
-rename to drivers/dma/dw/dw_dmac_regs.h
---
-1.8.5.rc3
-
++#include "dw_dmac_regs.h"
++
++/*
++ * This supports the Synopsys "DesignWare AHB Central DMA Controller",
++ * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
++ * of which use ARM any more). See the "Databook" from Synopsys for
++ * information beyond what licensees probably provide.
++ *
++ * The driver has currently been tested only with the Atmel AT32AP7000,
++ * which does not support descriptor writeback.
++ */
++
++static inline unsigned int dwc_get_dms(struct dw_dma_slave *slave)
++{
++ return slave ? slave->dst_master : 0;
++}
++
++static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave)
++{
++ return slave ? slave->src_master : 1;
++}
++
++static inline void dwc_set_masters(struct dw_dma_chan *dwc)
++{
++ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
++ struct dw_dma_slave *dws = dwc->chan.private;
++ unsigned char mmax = dw->nr_masters - 1;
++
++ if (dwc->request_line == ~0) {
++ dwc->src_master = min_t(unsigned char, mmax, dwc_get_sms(dws));
++ dwc->dst_master = min_t(unsigned char, mmax, dwc_get_dms(dws));
++ }
++}
++
++#define DWC_DEFAULT_CTLLO(_chan) ({ \
++ struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \
++ struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
++ bool _is_slave = is_slave_direction(_dwc->direction); \
++ u8 _smsize = _is_slave ? _sconfig->src_maxburst : \
++ DW_DMA_MSIZE_16; \
++ u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \
++ DW_DMA_MSIZE_16; \
++ \
++ (DWC_CTLL_DST_MSIZE(_dmsize) \
++ | DWC_CTLL_SRC_MSIZE(_smsize) \
++ | DWC_CTLL_LLP_D_EN \
++ | DWC_CTLL_LLP_S_EN \
++ | DWC_CTLL_DMS(_dwc->dst_master) \
++ | DWC_CTLL_SMS(_dwc->src_master)); \
++ })
++
++/*
++ * Number of descriptors to allocate for each channel. This should be
++ * made configurable somehow; preferably, the clients (at least the
++ * ones using slave transfers) should be able to give us a hint.
++ */
++#define NR_DESCS_PER_CHANNEL 64
++
++/*----------------------------------------------------------------------*/
++
++static struct device *chan2dev(struct dma_chan *chan)
++{
++ return &chan->dev->device;
++}
++static struct device *chan2parent(struct dma_chan *chan)
++{
++ return chan->dev->device.parent;
++}
++
++static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
++{
++ return to_dw_desc(dwc->active_list.next);
++}
++
++static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
++{
++ struct dw_desc *desc, *_desc;
++ struct dw_desc *ret = NULL;
++ unsigned int i = 0;
++ unsigned long flags;
++
++ spin_lock_irqsave(&dwc->lock, flags);
++ list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
++ i++;
++ if (async_tx_test_ack(&desc->txd)) {
++ list_del(&desc->desc_node);
++ ret = desc;
++ break;
++ }
++ dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
++ }
++ spin_unlock_irqrestore(&dwc->lock, flags);
++
++ dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
++
++ return ret;
++}
++
++/*
++ * Move a descriptor, including any children, to the free list.
++ * `desc' must not be on any lists.
++ */
++static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
++{
++ unsigned long flags;
++
++ if (desc) {
++ struct dw_desc *child;
++
++ spin_lock_irqsave(&dwc->lock, flags);
++ list_for_each_entry(child, &desc->tx_list, desc_node)
++ dev_vdbg(chan2dev(&dwc->chan),
++ "moving child desc %p to freelist\n",
++ child);
++ list_splice_init(&desc->tx_list, &dwc->free_list);
++ dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
++ list_add(&desc->desc_node, &dwc->free_list);
++ spin_unlock_irqrestore(&dwc->lock, flags);
++ }
++}
++
++static void dwc_initialize(struct dw_dma_chan *dwc)
++{
++ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
++ struct dw_dma_slave *dws = dwc->chan.private;
++ u32 cfghi = DWC_CFGH_FIFO_MODE;
++ u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
++
++ if (dwc->initialized == true)
++ return;
++
++ if (dws) {
++ /*
++ * We need controller-specific data to set up slave
++ * transfers.
++ */
++ BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
++
++ cfghi = dws->cfg_hi;
++ cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
++ } else {
++ if (dwc->direction == DMA_MEM_TO_DEV)
++ cfghi = DWC_CFGH_DST_PER(dwc->request_line);
++ else if (dwc->direction == DMA_DEV_TO_MEM)
++ cfghi = DWC_CFGH_SRC_PER(dwc->request_line);
++ }
++
++ channel_writel(dwc, CFG_LO, cfglo);
++ channel_writel(dwc, CFG_HI, cfghi);
++
++ /* Enable interrupts */
++ channel_set_bit(dw, MASK.XFER, dwc->mask);
++ channel_set_bit(dw, MASK.ERROR, dwc->mask);
++
++ dwc->initialized = true;
++}
++
++/*----------------------------------------------------------------------*/
++
++static inline unsigned int dwc_fast_fls(unsigned long long v)
++{
++ /*
++ * We can be a lot more clever here, but this should take care
++ * of the most common optimization.
++ */
++ if (!(v & 7))
++ return 3;
++ else if (!(v & 3))
++ return 2;
++ else if (!(v & 1))
++ return 1;
++ return 0;
++}
++
++static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
++{
++ dev_err(chan2dev(&dwc->chan),
++ " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
++ channel_readl(dwc, SAR),
++ channel_readl(dwc, DAR),
++ channel_readl(dwc, LLP),
++ channel_readl(dwc, CTL_HI),
++ channel_readl(dwc, CTL_LO));
++}
++
++static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
++{
++ channel_clear_bit(dw, CH_EN, dwc->mask);
++ while (dma_readl(dw, CH_EN) & dwc->mask)
++ cpu_relax();
++}
++
++/*----------------------------------------------------------------------*/
++
++/* Perform single block transfer */
++static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
++ struct dw_desc *desc)
++{
++ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
++ u32 ctllo;
++
++ /* Software emulation of LLP mode relies on interrupts to continue
++ * multi block transfer. */
++ ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN;
++
++ channel_writel(dwc, SAR, desc->lli.sar);
++ channel_writel(dwc, DAR, desc->lli.dar);
++ channel_writel(dwc, CTL_LO, ctllo);
++ channel_writel(dwc, CTL_HI, desc->lli.ctlhi);
++ channel_set_bit(dw, CH_EN, dwc->mask);
++
++ /* Move pointer to next descriptor */
++ dwc->tx_node_active = dwc->tx_node_active->next;
++}
++
++/* Called with dwc->lock held and bh disabled */
++static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
++{
++ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
++ unsigned long was_soft_llp;
++
++ /* ASSERT: channel is idle */
++ if (dma_readl(dw, CH_EN) & dwc->mask) {
++ dev_err(chan2dev(&dwc->chan),
++ "BUG: Attempted to start non-idle channel\n");
++ dwc_dump_chan_regs(dwc);
++
++ /* The tasklet will hopefully advance the queue... */
++ return;
++ }
++
++ if (dwc->nollp) {
++ was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP,
++ &dwc->flags);
++ if (was_soft_llp) {
++ dev_err(chan2dev(&dwc->chan),
++ "BUG: Attempted to start new LLP transfer "
++ "inside ongoing one\n");
++ return;
++ }
++
++ dwc_initialize(dwc);
++
++ dwc->residue = first->total_len;
++ dwc->tx_node_active = &first->tx_list;
++
++ /* Submit first block */
++ dwc_do_single_block(dwc, first);
++
++ return;
++ }
++
++ dwc_initialize(dwc);
++
++ channel_writel(dwc, LLP, first->txd.phys);
++ channel_writel(dwc, CTL_LO,
++ DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
++ channel_writel(dwc, CTL_HI, 0);
++ channel_set_bit(dw, CH_EN, dwc->mask);
++}
++
++/*----------------------------------------------------------------------*/
++
++static void
++dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
++ bool callback_required)
++{
++ dma_async_tx_callback callback = NULL;
++ void *param = NULL;
++ struct dma_async_tx_descriptor *txd = &desc->txd;
++ struct dw_desc *child;
++ unsigned long flags;
++
++ dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
++
++ spin_lock_irqsave(&dwc->lock, flags);
++ dma_cookie_complete(txd);
++ if (callback_required) {
++ callback = txd->callback;
++ param = txd->callback_param;
++ }
++
++ /* async_tx_ack */
++ list_for_each_entry(child, &desc->tx_list, desc_node)
++ async_tx_ack(&child->txd);
++ async_tx_ack(&desc->txd);
++
++ list_splice_init(&desc->tx_list, &dwc->free_list);
++ list_move(&desc->desc_node, &dwc->free_list);
++
++ if (!is_slave_direction(dwc->direction)) {
++ struct device *parent = chan2parent(&dwc->chan);
++ if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
++ if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
++ dma_unmap_single(parent, desc->lli.dar,
++ desc->total_len, DMA_FROM_DEVICE);
++ else
++ dma_unmap_page(parent, desc->lli.dar,
++ desc->total_len, DMA_FROM_DEVICE);
++ }
++ if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
++ if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
++ dma_unmap_single(parent, desc->lli.sar,
++ desc->total_len, DMA_TO_DEVICE);
++ else
++ dma_unmap_page(parent, desc->lli.sar,
++ desc->total_len, DMA_TO_DEVICE);
++ }
++ }
++
++ spin_unlock_irqrestore(&dwc->lock, flags);
++
++ if (callback)
++ callback(param);
++}
++
++static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
++{
++ struct dw_desc *desc, *_desc;
++ LIST_HEAD(list);
++ unsigned long flags;
++
++ spin_lock_irqsave(&dwc->lock, flags);
++ if (dma_readl(dw, CH_EN) & dwc->mask) {
++ dev_err(chan2dev(&dwc->chan),
++ "BUG: XFER bit set, but channel not idle!\n");
++
++ /* Try to continue after resetting the channel... */
++ dwc_chan_disable(dw, dwc);
++ }
++
++ /*
++ * Submit queued descriptors ASAP, i.e. before we go through
++ * the completed ones.
++ */
++ list_splice_init(&dwc->active_list, &list);
++ if (!list_empty(&dwc->queue)) {
++ list_move(dwc->queue.next, &dwc->active_list);
++ dwc_dostart(dwc, dwc_first_active(dwc));
++ }
++
++ spin_unlock_irqrestore(&dwc->lock, flags);
++
++ list_for_each_entry_safe(desc, _desc, &list, desc_node)
++ dwc_descriptor_complete(dwc, desc, true);
++}
++
++/* Returns how many bytes were already received from source */
++static inline u32 dwc_get_sent(struct dw_dma_chan *dwc)
++{
++ u32 ctlhi = channel_readl(dwc, CTL_HI);
++ u32 ctllo = channel_readl(dwc, CTL_LO);
++
++ return (ctlhi & DWC_CTLH_BLOCK_TS_MASK) * (1 << (ctllo >> 4 & 7));
++}
++
++static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
++{
++ dma_addr_t llp;
++ struct dw_desc *desc, *_desc;
++ struct dw_desc *child;
++ u32 status_xfer;
++ unsigned long flags;
++
++ spin_lock_irqsave(&dwc->lock, flags);
++ llp = channel_readl(dwc, LLP);
++ status_xfer = dma_readl(dw, RAW.XFER);
++
++ if (status_xfer & dwc->mask) {
++ /* Everything we've submitted is done */
++ dma_writel(dw, CLEAR.XFER, dwc->mask);
++
++ if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
++ struct list_head *head, *active = dwc->tx_node_active;
++
++ /*
++ * We are inside first active descriptor.
++ * Otherwise something is really wrong.
++ */
++ desc = dwc_first_active(dwc);
++
++ head = &desc->tx_list;
++ if (active != head) {
++ /* Update desc to reflect last sent one */
++ if (active != head->next)
++ desc = to_dw_desc(active->prev);
++
++ dwc->residue -= desc->len;
++
++ child = to_dw_desc(active);
++
++ /* Submit next block */
++ dwc_do_single_block(dwc, child);
++
++ spin_unlock_irqrestore(&dwc->lock, flags);
++ return;
++ }
++
++ /* We are done here */
++ clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
++ }
++
++ dwc->residue = 0;
++
++ spin_unlock_irqrestore(&dwc->lock, flags);
++
++ dwc_complete_all(dw, dwc);
++ return;
++ }
++
++ if (list_empty(&dwc->active_list)) {
++ dwc->residue = 0;
++ spin_unlock_irqrestore(&dwc->lock, flags);
++ return;
++ }
++
++ if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
++ dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__);
++ spin_unlock_irqrestore(&dwc->lock, flags);
++ return;
++ }
++
++ dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__,
++ (unsigned long long)llp);
++
++ list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
++ /* Initial residue value */
++ dwc->residue = desc->total_len;
++
++ /* Check first descriptors addr */
++ if (desc->txd.phys == llp) {
++ spin_unlock_irqrestore(&dwc->lock, flags);
++ return;
++ }
++
++ /* Check first descriptors llp */
++ if (desc->lli.llp == llp) {
++ /* This one is currently in progress */
++ dwc->residue -= dwc_get_sent(dwc);
++ spin_unlock_irqrestore(&dwc->lock, flags);
++ return;
++ }
++
++ dwc->residue -= desc->len;
++ list_for_each_entry(child, &desc->tx_list, desc_node) {
++ if (child->lli.llp == llp) {
++ /* Currently in progress */
++ dwc->residue -= dwc_get_sent(dwc);
++ spin_unlock_irqrestore(&dwc->lock, flags);
++ return;
++ }
++ dwc->residue -= child->len;
++ }
++
++ /*
++ * No descriptors so far seem to be in progress, i.e.
++ * this one must be done.
++ */
++ spin_unlock_irqrestore(&dwc->lock, flags);
++ dwc_descriptor_complete(dwc, desc, true);
++ spin_lock_irqsave(&dwc->lock, flags);
++ }
++
++ dev_err(chan2dev(&dwc->chan),
++ "BUG: All descriptors done, but channel not idle!\n");
++
++ /* Try to continue after resetting the channel... */
++ dwc_chan_disable(dw, dwc);
++
++ if (!list_empty(&dwc->queue)) {
++ list_move(dwc->queue.next, &dwc->active_list);
++ dwc_dostart(dwc, dwc_first_active(dwc));
++ }
++ spin_unlock_irqrestore(&dwc->lock, flags);
++}
++
++static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
++{
++ dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
++ lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo);
++}
++
++static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
++{
++ struct dw_desc *bad_desc;
++ struct dw_desc *child;
++ unsigned long flags;
++
++ dwc_scan_descriptors(dw, dwc);
++
++ spin_lock_irqsave(&dwc->lock, flags);
++
++ /*
++ * The descriptor currently at the head of the active list is
++ * borked. Since we don't have any way to report errors, we'll
++ * just have to scream loudly and try to carry on.
++ */
++ bad_desc = dwc_first_active(dwc);
++ list_del_init(&bad_desc->desc_node);
++ list_move(dwc->queue.next, dwc->active_list.prev);
++
++ /* Clear the error flag and try to restart the controller */
++ dma_writel(dw, CLEAR.ERROR, dwc->mask);
++ if (!list_empty(&dwc->active_list))
++ dwc_dostart(dwc, dwc_first_active(dwc));
++
++ /*
++ * WARN may seem harsh, but since this only happens
++ * when someone submits a bad physical address in a
++ * descriptor, we should consider ourselves lucky that the
++ * controller flagged an error instead of scribbling over
++ * random memory locations.
++ */
++ dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n"
++ " cookie: %d\n", bad_desc->txd.cookie);
++ dwc_dump_lli(dwc, &bad_desc->lli);
++ list_for_each_entry(child, &bad_desc->tx_list, desc_node)
++ dwc_dump_lli(dwc, &child->lli);
++
++ spin_unlock_irqrestore(&dwc->lock, flags);
++
++ /* Pretend the descriptor completed successfully */
++ dwc_descriptor_complete(dwc, bad_desc, true);
++}
++
++/* --------------------- Cyclic DMA API extensions -------------------- */
++
++dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
++{
++ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
++ return channel_readl(dwc, SAR);
++}
++EXPORT_SYMBOL(dw_dma_get_src_addr);
++
++dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
++{
++ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
++ return channel_readl(dwc, DAR);
++}
++EXPORT_SYMBOL(dw_dma_get_dst_addr);
++
++/* Called with dwc->lock held and all DMAC interrupts disabled */
++static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
++ u32 status_err, u32 status_xfer)
++{
++ unsigned long flags;
++
++ if (dwc->mask) {
++ void (*callback)(void *param);
++ void *callback_param;
++
++ dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
++ channel_readl(dwc, LLP));
++
++ callback = dwc->cdesc->period_callback;
++ callback_param = dwc->cdesc->period_callback_param;
++
++ if (callback)
++ callback(callback_param);
++ }
++
++ /*
++ * Error and transfer complete are highly unlikely, and will most
++ * likely be due to a configuration error by the user.
++ */
++ if (unlikely(status_err & dwc->mask) ||
++ unlikely(status_xfer & dwc->mask)) {
++ int i;
++
++ dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
++ "interrupt, stopping DMA transfer\n",
++ status_xfer ? "xfer" : "error");
++
++ spin_lock_irqsave(&dwc->lock, flags);
++
++ dwc_dump_chan_regs(dwc);
++
++ dwc_chan_disable(dw, dwc);
++
++ /* Make sure DMA does not restart by loading a new list */
++ channel_writel(dwc, LLP, 0);
++ channel_writel(dwc, CTL_LO, 0);
++ channel_writel(dwc, CTL_HI, 0);
++
++ dma_writel(dw, CLEAR.ERROR, dwc->mask);
++ dma_writel(dw, CLEAR.XFER, dwc->mask);
++
++ for (i = 0; i < dwc->cdesc->periods; i++)
++ dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
++
++ spin_unlock_irqrestore(&dwc->lock, flags);
++ }
++}
++
++/* ------------------------------------------------------------------------- */
++
++static void dw_dma_tasklet(unsigned long data)
++{
++ struct dw_dma *dw = (struct dw_dma *)data;
++ struct dw_dma_chan *dwc;
++ u32 status_xfer;
++ u32 status_err;
++ int i;
++
++ status_xfer = dma_readl(dw, RAW.XFER);
++ status_err = dma_readl(dw, RAW.ERROR);
++
++ dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err);
++
++ for (i = 0; i < dw->dma.chancnt; i++) {
++ dwc = &dw->chan[i];
++ if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
++ dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
++ else if (status_err & (1 << i))
++ dwc_handle_error(dw, dwc);
++ else if (status_xfer & (1 << i))
++ dwc_scan_descriptors(dw, dwc);
++ }
++
++ /*
++ * Re-enable interrupts.
++ */
++ channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
++ channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
++}
++
++static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
++{
++ struct dw_dma *dw = dev_id;
++ u32 status;
++
++ dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__,
++ dma_readl(dw, STATUS_INT));
++
++ /*
++ * Just disable the interrupts. We'll turn them back on in the
++ * softirq handler.
++ */
++ channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
++ channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
++
++ status = dma_readl(dw, STATUS_INT);
++ if (status) {
++ dev_err(dw->dma.dev,
++ "BUG: Unexpected interrupts pending: 0x%x\n",
++ status);
++
++ /* Try to recover */
++ channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
++ channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
++ channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
++ channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
++ }
++
++ tasklet_schedule(&dw->tasklet);
++
++ return IRQ_HANDLED;
++}
++
++/*----------------------------------------------------------------------*/
++
++static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
++{
++ struct dw_desc *desc = txd_to_dw_desc(tx);
++ struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
++ dma_cookie_t cookie;
++ unsigned long flags;
++
++ spin_lock_irqsave(&dwc->lock, flags);
++ cookie = dma_cookie_assign(tx);
++
++ /*
++ * REVISIT: We should attempt to chain as many descriptors as
++ * possible, perhaps even appending to those already submitted
++ * for DMA. But this is hard to do in a race-free manner.
++ */
++ if (list_empty(&dwc->active_list)) {
++ dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__,
++ desc->txd.cookie);
++ list_add_tail(&desc->desc_node, &dwc->active_list);
++ dwc_dostart(dwc, dwc_first_active(dwc));
++ } else {
++ dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__,
++ desc->txd.cookie);
++
++ list_add_tail(&desc->desc_node, &dwc->queue);
++ }
++
++ spin_unlock_irqrestore(&dwc->lock, flags);
++
++ return cookie;
++}
++
++static struct dma_async_tx_descriptor *
++dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
++ size_t len, unsigned long flags)
++{
++ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
++ struct dw_dma *dw = to_dw_dma(chan->device);
++ struct dw_desc *desc;
++ struct dw_desc *first;
++ struct dw_desc *prev;
++ size_t xfer_count;
++ size_t offset;
++ unsigned int src_width;
++ unsigned int dst_width;
++ unsigned int data_width;
++ u32 ctllo;
++
++ dev_vdbg(chan2dev(chan),
++ "%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__,
++ (unsigned long long)dest, (unsigned long long)src,
++ len, flags);
++
++ if (unlikely(!len)) {
++ dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
++ return NULL;
++ }
++
++ dwc->direction = DMA_MEM_TO_MEM;
++
++ data_width = min_t(unsigned int, dw->data_width[dwc->src_master],
++ dw->data_width[dwc->dst_master]);
++
++ src_width = dst_width = min_t(unsigned int, data_width,
++ dwc_fast_fls(src | dest | len));
++
++ ctllo = DWC_DEFAULT_CTLLO(chan)
++ | DWC_CTLL_DST_WIDTH(dst_width)
++ | DWC_CTLL_SRC_WIDTH(src_width)
++ | DWC_CTLL_DST_INC
++ | DWC_CTLL_SRC_INC
++ | DWC_CTLL_FC_M2M;
++ prev = first = NULL;
++
++ for (offset = 0; offset < len; offset += xfer_count << src_width) {
++ xfer_count = min_t(size_t, (len - offset) >> src_width,
++ dwc->block_size);
++
++ desc = dwc_desc_get(dwc);
++ if (!desc)
++ goto err_desc_get;
++
++ desc->lli.sar = src + offset;
++ desc->lli.dar = dest + offset;
++ desc->lli.ctllo = ctllo;
++ desc->lli.ctlhi = xfer_count;
++ desc->len = xfer_count << src_width;
++
++ if (!first) {
++ first = desc;
++ } else {
++ prev->lli.llp = desc->txd.phys;
++ list_add_tail(&desc->desc_node,
++ &first->tx_list);
++ }
++ prev = desc;
++ }
++
++ if (flags & DMA_PREP_INTERRUPT)
++ /* Trigger interrupt after last block */
++ prev->lli.ctllo |= DWC_CTLL_INT_EN;
++
++ prev->lli.llp = 0;
++ first->txd.flags = flags;
++ first->total_len = len;
++
++ return &first->txd;
++
++err_desc_get:
++ dwc_desc_put(dwc, first);
++ return NULL;
++}
++
++static struct dma_async_tx_descriptor *
++dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
++ unsigned int sg_len, enum dma_transfer_direction direction,
++ unsigned long flags, void *context)
++{
++ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
++ struct dw_dma *dw = to_dw_dma(chan->device);
++ struct dma_slave_config *sconfig = &dwc->dma_sconfig;
++ struct dw_desc *prev;
++ struct dw_desc *first;
++ u32 ctllo;
++ dma_addr_t reg;
++ unsigned int reg_width;
++ unsigned int mem_width;
++ unsigned int data_width;
++ unsigned int i;
++ struct scatterlist *sg;
++ size_t total_len = 0;
++
++ dev_vdbg(chan2dev(chan), "%s\n", __func__);
++
++ if (unlikely(!is_slave_direction(direction) || !sg_len))
++ return NULL;
++
++ dwc->direction = direction;
++
++ prev = first = NULL;
++
++ switch (direction) {
++ case DMA_MEM_TO_DEV:
++ reg_width = __fls(sconfig->dst_addr_width);
++ reg = sconfig->dst_addr;
++ ctllo = (DWC_DEFAULT_CTLLO(chan)
++ | DWC_CTLL_DST_WIDTH(reg_width)
++ | DWC_CTLL_DST_FIX
++ | DWC_CTLL_SRC_INC);
++
++ ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
++ DWC_CTLL_FC(DW_DMA_FC_D_M2P);
++
++ data_width = dw->data_width[dwc->src_master];
++
++ for_each_sg(sgl, sg, sg_len, i) {
++ struct dw_desc *desc;
++ u32 len, dlen, mem;
++
++ mem = sg_dma_address(sg);
++ len = sg_dma_len(sg);
++
++ mem_width = min_t(unsigned int,
++ data_width, dwc_fast_fls(mem | len));
++
++slave_sg_todev_fill_desc:
++ desc = dwc_desc_get(dwc);
++ if (!desc) {
++ dev_err(chan2dev(chan),
++ "not enough descriptors available\n");
++ goto err_desc_get;
++ }
++
++ desc->lli.sar = mem;
++ desc->lli.dar = reg;
++ desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
++ if ((len >> mem_width) > dwc->block_size) {
++ dlen = dwc->block_size << mem_width;
++ mem += dlen;
++ len -= dlen;
++ } else {
++ dlen = len;
++ len = 0;
++ }
++
++ desc->lli.ctlhi = dlen >> mem_width;
++ desc->len = dlen;
++
++ if (!first) {
++ first = desc;
++ } else {
++ prev->lli.llp = desc->txd.phys;
++ list_add_tail(&desc->desc_node,
++ &first->tx_list);
++ }
++ prev = desc;
++ total_len += dlen;
++
++ if (len)
++ goto slave_sg_todev_fill_desc;
++ }
++ break;
++ case DMA_DEV_TO_MEM:
++ reg_width = __fls(sconfig->src_addr_width);
++ reg = sconfig->src_addr;
++ ctllo = (DWC_DEFAULT_CTLLO(chan)
++ | DWC_CTLL_SRC_WIDTH(reg_width)
++ | DWC_CTLL_DST_INC
++ | DWC_CTLL_SRC_FIX);
++
++ ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
++ DWC_CTLL_FC(DW_DMA_FC_D_P2M);
++
++ data_width = dw->data_width[dwc->dst_master];
++
++ for_each_sg(sgl, sg, sg_len, i) {
++ struct dw_desc *desc;
++ u32 len, dlen, mem;
++
++ mem = sg_dma_address(sg);
++ len = sg_dma_len(sg);
++
++ mem_width = min_t(unsigned int,
++ data_width, dwc_fast_fls(mem | len));
++
++slave_sg_fromdev_fill_desc:
++ desc = dwc_desc_get(dwc);
++ if (!desc) {
++ dev_err(chan2dev(chan),
++ "not enough descriptors available\n");
++ goto err_desc_get;
++ }
++
++ desc->lli.sar = reg;
++ desc->lli.dar = mem;
++ desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
++ if ((len >> reg_width) > dwc->block_size) {
++ dlen = dwc->block_size << reg_width;
++ mem += dlen;
++ len -= dlen;
++ } else {
++ dlen = len;
++ len = 0;
++ }
++ desc->lli.ctlhi = dlen >> reg_width;
++ desc->len = dlen;
++
++ if (!first) {
++ first = desc;
++ } else {
++ prev->lli.llp = desc->txd.phys;
++ list_add_tail(&desc->desc_node,
++ &first->tx_list);
++ }
++ prev = desc;
++ total_len += dlen;
++
++ if (len)
++ goto slave_sg_fromdev_fill_desc;
++ }
++ break;
++ default:
++ return NULL;
++ }
++
++ if (flags & DMA_PREP_INTERRUPT)
++ /* Trigger interrupt after last block */
++ prev->lli.ctllo |= DWC_CTLL_INT_EN;
++
++ prev->lli.llp = 0;
++ first->total_len = total_len;
++
++ return &first->txd;
++
++err_desc_get:
++ dwc_desc_put(dwc, first);
++ return NULL;
++}
++
++/*
++ * Fix sconfig's burst size according to dw_dmac. We need to convert them as:
++ * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
++ *
++ * NOTE: burst size 2 is not supported by controller.
++ *
++ * This can be done by finding least significant bit set: n & (n - 1)
++ */
++static inline void convert_burst(u32 *maxburst)
++{
++ if (*maxburst > 1)
++ *maxburst = fls(*maxburst) - 2;
++ else
++ *maxburst = 0;
++}
++
++static int
++set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
++{
++ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
++
++ /* Check if chan will be configured for slave transfers */
++ if (!is_slave_direction(sconfig->direction))
++ return -EINVAL;
++
++ memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
++ dwc->direction = sconfig->direction;
++
++ /* Take the request line from slave_id member */
++ if (dwc->request_line == ~0)
++ dwc->request_line = sconfig->slave_id;
++
++ convert_burst(&dwc->dma_sconfig.src_maxburst);
++ convert_burst(&dwc->dma_sconfig.dst_maxburst);
++
++ return 0;
++}
++
++static inline void dwc_chan_pause(struct dw_dma_chan *dwc)
++{
++ u32 cfglo = channel_readl(dwc, CFG_LO);
++ unsigned int count = 20; /* timeout iterations */
++
++ channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
++ while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--)
++ udelay(2);
++
++ dwc->paused = true;
++}
++
++static inline void dwc_chan_resume(struct dw_dma_chan *dwc)
++{
++ u32 cfglo = channel_readl(dwc, CFG_LO);
++
++ channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
++
++ dwc->paused = false;
++}
++
++static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
++ unsigned long arg)
++{
++ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
++ struct dw_dma *dw = to_dw_dma(chan->device);
++ struct dw_desc *desc, *_desc;
++ unsigned long flags;
++ LIST_HEAD(list);
++
++ if (cmd == DMA_PAUSE) {
++ spin_lock_irqsave(&dwc->lock, flags);
++
++ dwc_chan_pause(dwc);
++
++ spin_unlock_irqrestore(&dwc->lock, flags);
++ } else if (cmd == DMA_RESUME) {
++ if (!dwc->paused)
++ return 0;
++
++ spin_lock_irqsave(&dwc->lock, flags);
++
++ dwc_chan_resume(dwc);
++
++ spin_unlock_irqrestore(&dwc->lock, flags);
++ } else if (cmd == DMA_TERMINATE_ALL) {
++ spin_lock_irqsave(&dwc->lock, flags);
++
++ clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
++
++ dwc_chan_disable(dw, dwc);
++
++ dwc_chan_resume(dwc);
++
++ /* active_list entries will end up before queued entries */
++ list_splice_init(&dwc->queue, &list);
++ list_splice_init(&dwc->active_list, &list);
++
++ spin_unlock_irqrestore(&dwc->lock, flags);
++
++ /* Flush all pending and queued descriptors */
++ list_for_each_entry_safe(desc, _desc, &list, desc_node)
++ dwc_descriptor_complete(dwc, desc, false);
++ } else if (cmd == DMA_SLAVE_CONFIG) {
++ return set_runtime_config(chan, (struct dma_slave_config *)arg);
++ } else {
++ return -ENXIO;
++ }
++
++ return 0;
++}
++
++static inline u32 dwc_get_residue(struct dw_dma_chan *dwc)
++{
++ unsigned long flags;
++ u32 residue;
++
++ spin_lock_irqsave(&dwc->lock, flags);
++
++ residue = dwc->residue;
++ if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue)
++ residue -= dwc_get_sent(dwc);
++
++ spin_unlock_irqrestore(&dwc->lock, flags);
++ return residue;
++}
++
++static enum dma_status
++dwc_tx_status(struct dma_chan *chan,
++ dma_cookie_t cookie,
++ struct dma_tx_state *txstate)
++{
++ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
++ enum dma_status ret;
++
++ ret = dma_cookie_status(chan, cookie, txstate);
++ if (ret != DMA_SUCCESS) {
++ dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
++
++ ret = dma_cookie_status(chan, cookie, txstate);
++ }
++
++ if (ret != DMA_SUCCESS)
++ dma_set_residue(txstate, dwc_get_residue(dwc));
++
++ if (dwc->paused)
++ return DMA_PAUSED;
++
++ return ret;
++}
++
++static void dwc_issue_pending(struct dma_chan *chan)
++{
++ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
++
++ if (!list_empty(&dwc->queue))
++ dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
++}
++
++static int dwc_alloc_chan_resources(struct dma_chan *chan)
++{
++ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
++ struct dw_dma *dw = to_dw_dma(chan->device);
++ struct dw_desc *desc;
++ int i;
++ unsigned long flags;
++
++ dev_vdbg(chan2dev(chan), "%s\n", __func__);
++
++ /* ASSERT: channel is idle */
++ if (dma_readl(dw, CH_EN) & dwc->mask) {
++ dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
++ return -EIO;
++ }
++
++ dma_cookie_init(chan);
++
++ /*
++ * NOTE: some controllers may have additional features that we
++ * need to initialize here, like "scatter-gather" (which
++ * doesn't mean what you think it means), and status writeback.
++ */
++
++ dwc_set_masters(dwc);
++
++ spin_lock_irqsave(&dwc->lock, flags);
++ i = dwc->descs_allocated;
++ while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
++ dma_addr_t phys;
++
++ spin_unlock_irqrestore(&dwc->lock, flags);
++
++ desc = dma_pool_alloc(dw->desc_pool, GFP_ATOMIC, &phys);
++ if (!desc)
++ goto err_desc_alloc;
++
++ memset(desc, 0, sizeof(struct dw_desc));
++
++ INIT_LIST_HEAD(&desc->tx_list);
++ dma_async_tx_descriptor_init(&desc->txd, chan);
++ desc->txd.tx_submit = dwc_tx_submit;
++ desc->txd.flags = DMA_CTRL_ACK;
++ desc->txd.phys = phys;
++
++ dwc_desc_put(dwc, desc);
++
++ spin_lock_irqsave(&dwc->lock, flags);
++ i = ++dwc->descs_allocated;
++ }
++
++ spin_unlock_irqrestore(&dwc->lock, flags);
++
++ dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
++
++ return i;
++
++err_desc_alloc:
++ dev_info(chan2dev(chan), "only allocated %d descriptors\n", i);
++
++ return i;
++}
++
++static void dwc_free_chan_resources(struct dma_chan *chan)
++{
++ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
++ struct dw_dma *dw = to_dw_dma(chan->device);
++ struct dw_desc *desc, *_desc;
++ unsigned long flags;
++ LIST_HEAD(list);
++
++ dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__,
++ dwc->descs_allocated);
++
++ /* ASSERT: channel is idle */
++ BUG_ON(!list_empty(&dwc->active_list));
++ BUG_ON(!list_empty(&dwc->queue));
++ BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
++
++ spin_lock_irqsave(&dwc->lock, flags);
++ list_splice_init(&dwc->free_list, &list);
++ dwc->descs_allocated = 0;
++ dwc->initialized = false;
++ dwc->request_line = ~0;
++
++ /* Disable interrupts */
++ channel_clear_bit(dw, MASK.XFER, dwc->mask);
++ channel_clear_bit(dw, MASK.ERROR, dwc->mask);
++
++ spin_unlock_irqrestore(&dwc->lock, flags);
++
++ list_for_each_entry_safe(desc, _desc, &list, desc_node) {
++ dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
++ dma_pool_free(dw->desc_pool, desc, desc->txd.phys);
++ }
++
++ dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
++}
++
++/*----------------------------------------------------------------------*/
++
++struct dw_dma_of_filter_args {
++ struct dw_dma *dw;
++ unsigned int req;
++ unsigned int src;
++ unsigned int dst;
++};
++
++static bool dw_dma_of_filter(struct dma_chan *chan, void *param)
++{
++ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
++ struct dw_dma_of_filter_args *fargs = param;
++
++ /* Ensure the device matches our channel */
++ if (chan->device != &fargs->dw->dma)
++ return false;
++
++ dwc->request_line = fargs->req;
++ dwc->src_master = fargs->src;
++ dwc->dst_master = fargs->dst;
++
++ return true;
++}
++
++static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
++ struct of_dma *ofdma)
++{
++ struct dw_dma *dw = ofdma->of_dma_data;
++ struct dw_dma_of_filter_args fargs = {
++ .dw = dw,
++ };
++ dma_cap_mask_t cap;
++
++ if (dma_spec->args_count != 3)
++ return NULL;
++
++ fargs.req = dma_spec->args[0];
++ fargs.src = dma_spec->args[1];
++ fargs.dst = dma_spec->args[2];
++
++ if (WARN_ON(fargs.req >= DW_DMA_MAX_NR_REQUESTS ||
++ fargs.src >= dw->nr_masters ||
++ fargs.dst >= dw->nr_masters))
++ return NULL;
++
++ dma_cap_zero(cap);
++ dma_cap_set(DMA_SLAVE, cap);
++
++ /* TODO: there should be a simpler way to do this */
++ return dma_request_channel(cap, dw_dma_of_filter, &fargs);
++}
++
++#ifdef CONFIG_ACPI
++static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param)
++{
++ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
++ struct acpi_dma_spec *dma_spec = param;
++
++ if (chan->device->dev != dma_spec->dev ||
++ chan->chan_id != dma_spec->chan_id)
++ return false;
++
++ dwc->request_line = dma_spec->slave_id;
++ dwc->src_master = dwc_get_sms(NULL);
++ dwc->dst_master = dwc_get_dms(NULL);
++
++ return true;
++}
++
++static void dw_dma_acpi_controller_register(struct dw_dma *dw)
++{
++ struct device *dev = dw->dma.dev;
++ struct acpi_dma_filter_info *info;
++ int ret;
++
++ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
++ if (!info)
++ return;
++
++ dma_cap_zero(info->dma_cap);
++ dma_cap_set(DMA_SLAVE, info->dma_cap);
++ info->filter_fn = dw_dma_acpi_filter;
++
++ ret = devm_acpi_dma_controller_register(dev, acpi_dma_simple_xlate,
++ info);
++ if (ret)
++ dev_err(dev, "could not register acpi_dma_controller\n");
++}
++#else /* !CONFIG_ACPI */
++static inline void dw_dma_acpi_controller_register(struct dw_dma *dw) {}
++#endif /* !CONFIG_ACPI */
++
++/* --------------------- Cyclic DMA API extensions -------------------- */
++
++/**
++ * dw_dma_cyclic_start - start the cyclic DMA transfer
++ * @chan: the DMA channel to start
++ *
++ * Must be called with soft interrupts disabled. Returns zero on success or
++ * -errno on failure.
++ */
++int dw_dma_cyclic_start(struct dma_chan *chan)
++{
++ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
++ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
++ unsigned long flags;
++
++ if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
++ dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
++ return -ENODEV;
++ }
++
++ spin_lock_irqsave(&dwc->lock, flags);
++
++ /* Assert channel is idle */
++ if (dma_readl(dw, CH_EN) & dwc->mask) {
++ dev_err(chan2dev(&dwc->chan),
++ "BUG: Attempted to start non-idle channel\n");
++ dwc_dump_chan_regs(dwc);
++ spin_unlock_irqrestore(&dwc->lock, flags);
++ return -EBUSY;
++ }
++
++ dma_writel(dw, CLEAR.ERROR, dwc->mask);
++ dma_writel(dw, CLEAR.XFER, dwc->mask);
++
++ /* Setup DMAC channel registers */
++ channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
++ channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
++ channel_writel(dwc, CTL_HI, 0);
++
++ channel_set_bit(dw, CH_EN, dwc->mask);
++
++ spin_unlock_irqrestore(&dwc->lock, flags);
++
++ return 0;
++}
++EXPORT_SYMBOL(dw_dma_cyclic_start);
++
++/**
++ * dw_dma_cyclic_stop - stop the cyclic DMA transfer
++ * @chan: the DMA channel to stop
++ *
++ * Must be called with soft interrupts disabled.
++ */
++void dw_dma_cyclic_stop(struct dma_chan *chan)
++{
++ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
++ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
++ unsigned long flags;
++
++ spin_lock_irqsave(&dwc->lock, flags);
++
++ dwc_chan_disable(dw, dwc);
++
++ spin_unlock_irqrestore(&dwc->lock, flags);
++}
++EXPORT_SYMBOL(dw_dma_cyclic_stop);
++
++/**
++ * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
++ * @chan: the DMA channel to prepare
++ * @buf_addr: physical DMA address where the buffer starts
++ * @buf_len: total number of bytes for the entire buffer
++ * @period_len: number of bytes for each period
++ * @direction: transfer direction, to or from device
++ *
++ * Must be called before trying to start the transfer. Returns a valid struct
++ * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
++ */
++struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
++ dma_addr_t buf_addr, size_t buf_len, size_t period_len,
++ enum dma_transfer_direction direction)
++{
++ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
++ struct dma_slave_config *sconfig = &dwc->dma_sconfig;
++ struct dw_cyclic_desc *cdesc;
++ struct dw_cyclic_desc *retval = NULL;
++ struct dw_desc *desc;
++ struct dw_desc *last = NULL;
++ unsigned long was_cyclic;
++ unsigned int reg_width;
++ unsigned int periods;
++ unsigned int i;
++ unsigned long flags;
++
++ spin_lock_irqsave(&dwc->lock, flags);
++ if (dwc->nollp) {
++ spin_unlock_irqrestore(&dwc->lock, flags);
++ dev_dbg(chan2dev(&dwc->chan),
++ "channel doesn't support LLP transfers\n");
++ return ERR_PTR(-EINVAL);
++ }
++
++ if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
++ spin_unlock_irqrestore(&dwc->lock, flags);
++ dev_dbg(chan2dev(&dwc->chan),
++ "queue and/or active list are not empty\n");
++ return ERR_PTR(-EBUSY);
++ }
++
++ was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
++ spin_unlock_irqrestore(&dwc->lock, flags);
++ if (was_cyclic) {
++ dev_dbg(chan2dev(&dwc->chan),
++ "channel already prepared for cyclic DMA\n");
++ return ERR_PTR(-EBUSY);
++ }
++
++ retval = ERR_PTR(-EINVAL);
++
++ if (unlikely(!is_slave_direction(direction)))
++ goto out_err;
++
++ dwc->direction = direction;
++
++ if (direction == DMA_MEM_TO_DEV)
++ reg_width = __ffs(sconfig->dst_addr_width);
++ else
++ reg_width = __ffs(sconfig->src_addr_width);
++
++ periods = buf_len / period_len;
++
++ /* Check for too big/unaligned periods and unaligned DMA buffer. */
++ if (period_len > (dwc->block_size << reg_width))
++ goto out_err;
++ if (unlikely(period_len & ((1 << reg_width) - 1)))
++ goto out_err;
++ if (unlikely(buf_addr & ((1 << reg_width) - 1)))
++ goto out_err;
++
++ retval = ERR_PTR(-ENOMEM);
++
++ if (periods > NR_DESCS_PER_CHANNEL)
++ goto out_err;
++
++ cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
++ if (!cdesc)
++ goto out_err;
++
++ cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
++ if (!cdesc->desc)
++ goto out_err_alloc;
++
++ for (i = 0; i < periods; i++) {
++ desc = dwc_desc_get(dwc);
++ if (!desc)
++ goto out_err_desc_get;
++
++ switch (direction) {
++ case DMA_MEM_TO_DEV:
++ desc->lli.dar = sconfig->dst_addr;
++ desc->lli.sar = buf_addr + (period_len * i);
++ desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
++ | DWC_CTLL_DST_WIDTH(reg_width)
++ | DWC_CTLL_SRC_WIDTH(reg_width)
++ | DWC_CTLL_DST_FIX
++ | DWC_CTLL_SRC_INC
++ | DWC_CTLL_INT_EN);
++
++ desc->lli.ctllo |= sconfig->device_fc ?
++ DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
++ DWC_CTLL_FC(DW_DMA_FC_D_M2P);
++
++ break;
++ case DMA_DEV_TO_MEM:
++ desc->lli.dar = buf_addr + (period_len * i);
++ desc->lli.sar = sconfig->src_addr;
++ desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
++ | DWC_CTLL_SRC_WIDTH(reg_width)
++ | DWC_CTLL_DST_WIDTH(reg_width)
++ | DWC_CTLL_DST_INC
++ | DWC_CTLL_SRC_FIX
++ | DWC_CTLL_INT_EN);
++
++ desc->lli.ctllo |= sconfig->device_fc ?
++ DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
++ DWC_CTLL_FC(DW_DMA_FC_D_P2M);
++
++ break;
++ default:
++ break;
++ }
++
++ desc->lli.ctlhi = (period_len >> reg_width);
++ cdesc->desc[i] = desc;
++
++ if (last)
++ last->lli.llp = desc->txd.phys;
++
++ last = desc;
++ }
++
++ /* Let's make a cyclic list */
++ last->lli.llp = cdesc->desc[0]->txd.phys;
++
++ dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu "
++ "period %zu periods %d\n", (unsigned long long)buf_addr,
++ buf_len, period_len, periods);
++
++ cdesc->periods = periods;
++ dwc->cdesc = cdesc;
++
++ return cdesc;
++
++out_err_desc_get:
++ while (i--)
++ dwc_desc_put(dwc, cdesc->desc[i]);
++out_err_alloc:
++ kfree(cdesc);
++out_err:
++ clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
++ return (struct dw_cyclic_desc *)retval;
++}
++EXPORT_SYMBOL(dw_dma_cyclic_prep);
++
++/**
++ * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
++ * @chan: the DMA channel to free
++ */
++void dw_dma_cyclic_free(struct dma_chan *chan)
++{
++ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
++ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
++ struct dw_cyclic_desc *cdesc = dwc->cdesc;
++ int i;
++ unsigned long flags;
++
++ dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__);
++
++ if (!cdesc)
++ return;
++
++ spin_lock_irqsave(&dwc->lock, flags);
++
++ dwc_chan_disable(dw, dwc);
++
++ dma_writel(dw, CLEAR.ERROR, dwc->mask);
++ dma_writel(dw, CLEAR.XFER, dwc->mask);
++
++ spin_unlock_irqrestore(&dwc->lock, flags);
++
++ for (i = 0; i < cdesc->periods; i++)
++ dwc_desc_put(dwc, cdesc->desc[i]);
++
++ kfree(cdesc->desc);
++ kfree(cdesc);
++
++ clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
++}
++EXPORT_SYMBOL(dw_dma_cyclic_free);
++
++/*----------------------------------------------------------------------*/
++
++static void dw_dma_off(struct dw_dma *dw)
++{
++ int i;
++
++ dma_writel(dw, CFG, 0);
++
++ channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
++ channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
++ channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
++ channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
++
++ while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
++ cpu_relax();
++
++ for (i = 0; i < dw->dma.chancnt; i++)
++ dw->chan[i].initialized = false;
++}
++
++#ifdef CONFIG_OF
++static struct dw_dma_platform_data *
++dw_dma_parse_dt(struct platform_device *pdev)
++{
++ struct device_node *np = pdev->dev.of_node;
++ struct dw_dma_platform_data *pdata;
++ u32 tmp, arr[4];
++
++ if (!np) {
++ dev_err(&pdev->dev, "Missing DT data\n");
++ return NULL;
++ }
++
++ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
++ if (!pdata)
++ return NULL;
++
++ if (of_property_read_u32(np, "dma-channels", &pdata->nr_channels))
++ return NULL;
++
++ if (of_property_read_bool(np, "is_private"))
++ pdata->is_private = true;
++
++ if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
++ pdata->chan_allocation_order = (unsigned char)tmp;
++
++ if (!of_property_read_u32(np, "chan_priority", &tmp))
++ pdata->chan_priority = tmp;
++
++ if (!of_property_read_u32(np, "block_size", &tmp))
++ pdata->block_size = tmp;
++
++ if (!of_property_read_u32(np, "dma-masters", &tmp)) {
++ if (tmp > 4)
++ return NULL;
++
++ pdata->nr_masters = tmp;
++ }
++
++ if (!of_property_read_u32_array(np, "data_width", arr,
++ pdata->nr_masters))
++ for (tmp = 0; tmp < pdata->nr_masters; tmp++)
++ pdata->data_width[tmp] = arr[tmp];
++
++ return pdata;
++}
++#else
++static inline struct dw_dma_platform_data *
++dw_dma_parse_dt(struct platform_device *pdev)
++{
++ return NULL;
++}
++#endif
++
++static int dw_probe(struct platform_device *pdev)
++{
++ struct dw_dma_platform_data *pdata;
++ struct resource *io;
++ struct dw_dma *dw;
++ size_t size;
++ void __iomem *regs;
++ bool autocfg;
++ unsigned int dw_params;
++ unsigned int nr_channels;
++ unsigned int max_blk_size = 0;
++ int irq;
++ int err;
++ int i;
++
++ irq = platform_get_irq(pdev, 0);
++ if (irq < 0)
++ return irq;
++
++ io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ regs = devm_ioremap_resource(&pdev->dev, io);
++ if (IS_ERR(regs))
++ return PTR_ERR(regs);
++
++ /* Apply default dma_mask if needed */
++ if (!pdev->dev.dma_mask) {
++ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
++ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
++ }
++
++ dw_params = dma_read_byaddr(regs, DW_PARAMS);
++ autocfg = dw_params >> DW_PARAMS_EN & 0x1;
++
++ dev_dbg(&pdev->dev, "DW_PARAMS: 0x%08x\n", dw_params);
++
++ pdata = dev_get_platdata(&pdev->dev);
++ if (!pdata)
++ pdata = dw_dma_parse_dt(pdev);
++
++ if (!pdata && autocfg) {
++ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
++ if (!pdata)
++ return -ENOMEM;
++
++ /* Fill platform data with the default values */
++ pdata->is_private = true;
++ pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
++ pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
++ } else if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
++ return -EINVAL;
++
++ if (autocfg)
++ nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1;
++ else
++ nr_channels = pdata->nr_channels;
++
++ size = sizeof(struct dw_dma) + nr_channels * sizeof(struct dw_dma_chan);
++ dw = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
++ if (!dw)
++ return -ENOMEM;
++
++ dw->clk = devm_clk_get(&pdev->dev, "hclk");
++ if (IS_ERR(dw->clk))
++ return PTR_ERR(dw->clk);
++ clk_prepare_enable(dw->clk);
++
++ dw->regs = regs;
++
++ /* Get hardware configuration parameters */
++ if (autocfg) {
++ max_blk_size = dma_readl(dw, MAX_BLK_SIZE);
++
++ dw->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1;
++ for (i = 0; i < dw->nr_masters; i++) {
++ dw->data_width[i] =
++ (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3) + 2;
++ }
++ } else {
++ dw->nr_masters = pdata->nr_masters;
++ memcpy(dw->data_width, pdata->data_width, 4);
++ }
++
++ /* Calculate all channel mask before DMA setup */
++ dw->all_chan_mask = (1 << nr_channels) - 1;
++
++ /* Force dma off, just in case */
++ dw_dma_off(dw);
++
++ /* Disable BLOCK interrupts as well */
++ channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
++
++ err = devm_request_irq(&pdev->dev, irq, dw_dma_interrupt, 0,
++ "dw_dmac", dw);
++ if (err)
++ return err;
++
++ platform_set_drvdata(pdev, dw);
++
++ /* Create a pool of consistent memory blocks for hardware descriptors */
++ dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", &pdev->dev,
++ sizeof(struct dw_desc), 4, 0);
++ if (!dw->desc_pool) {
++ dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
++ return -ENOMEM;
++ }
++
++ tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
++
++ INIT_LIST_HEAD(&dw->dma.channels);
++ for (i = 0; i < nr_channels; i++) {
++ struct dw_dma_chan *dwc = &dw->chan[i];
++ int r = nr_channels - i - 1;
++
++ dwc->chan.device = &dw->dma;
++ dma_cookie_init(&dwc->chan);
++ if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
++ list_add_tail(&dwc->chan.device_node,
++ &dw->dma.channels);
++ else
++ list_add(&dwc->chan.device_node, &dw->dma.channels);
++
++ /* 7 is highest priority & 0 is lowest. */
++ if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
++ dwc->priority = r;
++ else
++ dwc->priority = i;
++
++ dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
++ spin_lock_init(&dwc->lock);
++ dwc->mask = 1 << i;
++
++ INIT_LIST_HEAD(&dwc->active_list);
++ INIT_LIST_HEAD(&dwc->queue);
++ INIT_LIST_HEAD(&dwc->free_list);
++
++ channel_clear_bit(dw, CH_EN, dwc->mask);
++
++ dwc->direction = DMA_TRANS_NONE;
++ dwc->request_line = ~0;
++
++ /* Hardware configuration */
++ if (autocfg) {
++ unsigned int dwc_params;
++
++ dwc_params = dma_read_byaddr(regs + r * sizeof(u32),
++ DWC_PARAMS);
++
++ dev_dbg(&pdev->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
++ dwc_params);
++
++ /* Decode maximum block size for given channel. The
++ * stored 4 bit value represents blocks from 0x00 for 3
++ * up to 0x0a for 4095. */
++ dwc->block_size =
++ (4 << ((max_blk_size >> 4 * i) & 0xf)) - 1;
++ dwc->nollp =
++ (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0;
++ } else {
++ dwc->block_size = pdata->block_size;
++
++ /* Check if channel supports multi block transfer */
++ channel_writel(dwc, LLP, 0xfffffffc);
++ dwc->nollp =
++ (channel_readl(dwc, LLP) & 0xfffffffc) == 0;
++ channel_writel(dwc, LLP, 0);
++ }
++ }
++
++ /* Clear all interrupts on all channels. */
++ dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
++ dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
++ dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
++ dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
++ dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
++
++ dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
++ dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
++ if (pdata->is_private)
++ dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
++ dw->dma.dev = &pdev->dev;
++ dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
++ dw->dma.device_free_chan_resources = dwc_free_chan_resources;
++
++ dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
++
++ dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
++ dw->dma.device_control = dwc_control;
++
++ dw->dma.device_tx_status = dwc_tx_status;
++ dw->dma.device_issue_pending = dwc_issue_pending;
++
++ dma_writel(dw, CFG, DW_CFG_DMA_EN);
++
++ dev_info(&pdev->dev, "DesignWare DMA Controller, %d channels\n",
++ nr_channels);
++
++ dma_async_device_register(&dw->dma);
++
++ if (pdev->dev.of_node) {
++ err = of_dma_controller_register(pdev->dev.of_node,
++ dw_dma_of_xlate, dw);
++ if (err)
++ dev_err(&pdev->dev,
++ "could not register of_dma_controller\n");
++ }
++
++ if (ACPI_HANDLE(&pdev->dev))
++ dw_dma_acpi_controller_register(dw);
++
++ return 0;
++}
++
++static int dw_remove(struct platform_device *pdev)
++{
++ struct dw_dma *dw = platform_get_drvdata(pdev);
++ struct dw_dma_chan *dwc, *_dwc;
++
++ if (pdev->dev.of_node)
++ of_dma_controller_free(pdev->dev.of_node);
++ dw_dma_off(dw);
++ dma_async_device_unregister(&dw->dma);
++
++ tasklet_kill(&dw->tasklet);
++
++ list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
++ chan.device_node) {
++ list_del(&dwc->chan.device_node);
++ channel_clear_bit(dw, CH_EN, dwc->mask);
++ }
++
++ return 0;
++}
++
++static void dw_shutdown(struct platform_device *pdev)
++{
++ struct dw_dma *dw = platform_get_drvdata(pdev);
++
++ dw_dma_off(dw);
++ clk_disable_unprepare(dw->clk);
++}
++
++static int dw_suspend_noirq(struct device *dev)
++{
++ struct platform_device *pdev = to_platform_device(dev);
++ struct dw_dma *dw = platform_get_drvdata(pdev);
++
++ dw_dma_off(dw);
++ clk_disable_unprepare(dw->clk);
++
++ return 0;
++}
++
++static int dw_resume_noirq(struct device *dev)
++{
++ struct platform_device *pdev = to_platform_device(dev);
++ struct dw_dma *dw = platform_get_drvdata(pdev);
++
++ clk_prepare_enable(dw->clk);
++ dma_writel(dw, CFG, DW_CFG_DMA_EN);
++
++ return 0;
++}
++
++static const struct dev_pm_ops dw_dev_pm_ops = {
++ .suspend_noirq = dw_suspend_noirq,
++ .resume_noirq = dw_resume_noirq,
++ .freeze_noirq = dw_suspend_noirq,
++ .thaw_noirq = dw_resume_noirq,
++ .restore_noirq = dw_resume_noirq,
++ .poweroff_noirq = dw_suspend_noirq,
++};
++
++#ifdef CONFIG_OF
++static const struct of_device_id dw_dma_of_id_table[] = {
++ { .compatible = "snps,dma-spear1340" },
++ {}
++};
++MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
++#endif
++
++#ifdef CONFIG_ACPI
++static const struct acpi_device_id dw_dma_acpi_id_table[] = {
++ { "INTL9C60", 0 },
++ { }
++};
++#endif
++
++static struct platform_driver dw_driver = {
++ .probe = dw_probe,
++ .remove = dw_remove,
++ .shutdown = dw_shutdown,
++ .driver = {
++ .name = "dw_dmac",
++ .pm = &dw_dev_pm_ops,
++ .of_match_table = of_match_ptr(dw_dma_of_id_table),
++ .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table),
++ },
++};
++
++static int __init dw_init(void)
++{
++ return platform_driver_register(&dw_driver);
++}
++subsys_initcall(dw_init);
++
++static void __exit dw_exit(void)
++{
++ platform_driver_unregister(&dw_driver);
++}
++module_exit(dw_exit);
++
++MODULE_LICENSE("GPL v2");
++MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
++MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
++MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
+--- /dev/null
++++ b/drivers/dma/dw/dw_dmac_regs.h
+@@ -0,0 +1,311 @@
++/*
++ * Driver for the Synopsys DesignWare AHB DMA Controller
++ *
++ * Copyright (C) 2005-2007 Atmel Corporation
++ * Copyright (C) 2010-2011 ST Microelectronics
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/dmaengine.h>
++#include <linux/dw_dmac.h>
++
++#define DW_DMA_MAX_NR_CHANNELS 8
++#define DW_DMA_MAX_NR_REQUESTS 16
++
++/* flow controller */
++enum dw_dma_fc {
++ DW_DMA_FC_D_M2M,
++ DW_DMA_FC_D_M2P,
++ DW_DMA_FC_D_P2M,
++ DW_DMA_FC_D_P2P,
++ DW_DMA_FC_P_P2M,
++ DW_DMA_FC_SP_P2P,
++ DW_DMA_FC_P_M2P,
++ DW_DMA_FC_DP_P2P,
++};
++
++/*
++ * Redefine this macro to handle differences between 32- and 64-bit
++ * addressing, big vs. little endian, etc.
++ */
++#define DW_REG(name) u32 name; u32 __pad_##name
++
++/* Hardware register definitions. */
++struct dw_dma_chan_regs {
++ DW_REG(SAR); /* Source Address Register */
++ DW_REG(DAR); /* Destination Address Register */
++ DW_REG(LLP); /* Linked List Pointer */
++ u32 CTL_LO; /* Control Register Low */
++ u32 CTL_HI; /* Control Register High */
++ DW_REG(SSTAT);
++ DW_REG(DSTAT);
++ DW_REG(SSTATAR);
++ DW_REG(DSTATAR);
++ u32 CFG_LO; /* Configuration Register Low */
++ u32 CFG_HI; /* Configuration Register High */
++ DW_REG(SGR);
++ DW_REG(DSR);
++};
++
++struct dw_dma_irq_regs {
++ DW_REG(XFER);
++ DW_REG(BLOCK);
++ DW_REG(SRC_TRAN);
++ DW_REG(DST_TRAN);
++ DW_REG(ERROR);
++};
++
++struct dw_dma_regs {
++ /* per-channel registers */
++ struct dw_dma_chan_regs CHAN[DW_DMA_MAX_NR_CHANNELS];
++
++ /* irq handling */
++ struct dw_dma_irq_regs RAW; /* r */
++ struct dw_dma_irq_regs STATUS; /* r (raw & mask) */
++ struct dw_dma_irq_regs MASK; /* rw (set = irq enabled) */
++ struct dw_dma_irq_regs CLEAR; /* w (ack, affects "raw") */
++
++ DW_REG(STATUS_INT); /* r */
++
++ /* software handshaking */
++ DW_REG(REQ_SRC);
++ DW_REG(REQ_DST);
++ DW_REG(SGL_REQ_SRC);
++ DW_REG(SGL_REQ_DST);
++ DW_REG(LAST_SRC);
++ DW_REG(LAST_DST);
++
++ /* miscellaneous */
++ DW_REG(CFG);
++ DW_REG(CH_EN);
++ DW_REG(ID);
++ DW_REG(TEST);
++
++ /* reserved */
++ DW_REG(__reserved0);
++ DW_REG(__reserved1);
++
++ /* optional encoded params, 0x3c8..0x3f7 */
++ u32 __reserved;
++
++ /* per-channel configuration registers */
++ u32 DWC_PARAMS[DW_DMA_MAX_NR_CHANNELS];
++ u32 MULTI_BLK_TYPE;
++ u32 MAX_BLK_SIZE;
++
++ /* top-level parameters */
++ u32 DW_PARAMS;
++};
++
++#ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO
++#define dma_readl_native ioread32be
++#define dma_writel_native iowrite32be
++#else
++#define dma_readl_native readl
++#define dma_writel_native writel
++#endif
++
++/* To access the registers in early stage of probe */
++#define dma_read_byaddr(addr, name) \
++ dma_readl_native((addr) + offsetof(struct dw_dma_regs, name))
++
++/* Bitfields in DW_PARAMS */
++#define DW_PARAMS_NR_CHAN 8 /* number of channels */
++#define DW_PARAMS_NR_MASTER 11 /* number of AHB masters */
++#define DW_PARAMS_DATA_WIDTH(n) (15 + 2 * (n))
++#define DW_PARAMS_DATA_WIDTH1 15 /* master 1 data width */
++#define DW_PARAMS_DATA_WIDTH2 17 /* master 2 data width */
++#define DW_PARAMS_DATA_WIDTH3 19 /* master 3 data width */
++#define DW_PARAMS_DATA_WIDTH4 21 /* master 4 data width */
++#define DW_PARAMS_EN 28 /* encoded parameters */
++
++/* Bitfields in DWC_PARAMS */
++#define DWC_PARAMS_MBLK_EN 11 /* multi block transfer */
++
++/* Bitfields in CTL_LO */
++#define DWC_CTLL_INT_EN (1 << 0) /* irqs enabled? */
++#define DWC_CTLL_DST_WIDTH(n) ((n)<<1) /* bytes per element */
++#define DWC_CTLL_SRC_WIDTH(n) ((n)<<4)
++#define DWC_CTLL_DST_INC (0<<7) /* DAR update/not */
++#define DWC_CTLL_DST_DEC (1<<7)
++#define DWC_CTLL_DST_FIX (2<<7)
++#define DWC_CTLL_SRC_INC (0<<7) /* SAR update/not */
++#define DWC_CTLL_SRC_DEC (1<<9)
++#define DWC_CTLL_SRC_FIX (2<<9)
++#define DWC_CTLL_DST_MSIZE(n) ((n)<<11) /* burst, #elements */
++#define DWC_CTLL_SRC_MSIZE(n) ((n)<<14)
++#define DWC_CTLL_S_GATH_EN (1 << 17) /* src gather, !FIX */
++#define DWC_CTLL_D_SCAT_EN (1 << 18) /* dst scatter, !FIX */
++#define DWC_CTLL_FC(n) ((n) << 20)
++#define DWC_CTLL_FC_M2M (0 << 20) /* mem-to-mem */
++#define DWC_CTLL_FC_M2P (1 << 20) /* mem-to-periph */
++#define DWC_CTLL_FC_P2M (2 << 20) /* periph-to-mem */
++#define DWC_CTLL_FC_P2P (3 << 20) /* periph-to-periph */
++/* plus 4 transfer types for peripheral-as-flow-controller */
++#define DWC_CTLL_DMS(n) ((n)<<23) /* dst master select */
++#define DWC_CTLL_SMS(n) ((n)<<25) /* src master select */
++#define DWC_CTLL_LLP_D_EN (1 << 27) /* dest block chain */
++#define DWC_CTLL_LLP_S_EN (1 << 28) /* src block chain */
++
++/* Bitfields in CTL_HI */
++#define DWC_CTLH_DONE 0x00001000
++#define DWC_CTLH_BLOCK_TS_MASK 0x00000fff
++
++/* Bitfields in CFG_LO. Platform-configurable bits are in <linux/dw_dmac.h> */
++#define DWC_CFGL_CH_PRIOR_MASK (0x7 << 5) /* priority mask */
++#define DWC_CFGL_CH_PRIOR(x) ((x) << 5) /* priority */
++#define DWC_CFGL_CH_SUSP (1 << 8) /* pause xfer */
++#define DWC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */
++#define DWC_CFGL_HS_DST (1 << 10) /* handshake w/dst */
++#define DWC_CFGL_HS_SRC (1 << 11) /* handshake w/src */
++#define DWC_CFGL_MAX_BURST(x) ((x) << 20)
++#define DWC_CFGL_RELOAD_SAR (1 << 30)
++#define DWC_CFGL_RELOAD_DAR (1 << 31)
++
++/* Bitfields in CFG_HI. Platform-configurable bits are in <linux/dw_dmac.h> */
++#define DWC_CFGH_DS_UPD_EN (1 << 5)
++#define DWC_CFGH_SS_UPD_EN (1 << 6)
++
++/* Bitfields in SGR */
++#define DWC_SGR_SGI(x) ((x) << 0)
++#define DWC_SGR_SGC(x) ((x) << 20)
++
++/* Bitfields in DSR */
++#define DWC_DSR_DSI(x) ((x) << 0)
++#define DWC_DSR_DSC(x) ((x) << 20)
++
++/* Bitfields in CFG */
++#define DW_CFG_DMA_EN (1 << 0)
++
++enum dw_dmac_flags {
++ DW_DMA_IS_CYCLIC = 0,
++ DW_DMA_IS_SOFT_LLP = 1,
++};
++
++struct dw_dma_chan {
++ struct dma_chan chan;
++ void __iomem *ch_regs;
++ u8 mask;
++ u8 priority;
++ enum dma_transfer_direction direction;
++ bool paused;
++ bool initialized;
++
++ /* software emulation of the LLP transfers */
++ struct list_head *tx_node_active;
++
++ spinlock_t lock;
++
++ /* these other elements are all protected by lock */
++ unsigned long flags;
++ struct list_head active_list;
++ struct list_head queue;
++ struct list_head free_list;
++ u32 residue;
++ struct dw_cyclic_desc *cdesc;
++
++ unsigned int descs_allocated;
++
++ /* hardware configuration */
++ unsigned int block_size;
++ bool nollp;
++
++ /* custom slave configuration */
++ unsigned int request_line;
++ unsigned char src_master;
++ unsigned char dst_master;
++
++ /* configuration passed via DMA_SLAVE_CONFIG */
++ struct dma_slave_config dma_sconfig;
++};
++
++static inline struct dw_dma_chan_regs __iomem *
++__dwc_regs(struct dw_dma_chan *dwc)
++{
++ return dwc->ch_regs;
++}
++
++#define channel_readl(dwc, name) \
++ dma_readl_native(&(__dwc_regs(dwc)->name))
++#define channel_writel(dwc, name, val) \
++ dma_writel_native((val), &(__dwc_regs(dwc)->name))
++
++static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
++{
++ return container_of(chan, struct dw_dma_chan, chan);
++}
++
++struct dw_dma {
++ struct dma_device dma;
++ void __iomem *regs;
++ struct dma_pool *desc_pool;
++ struct tasklet_struct tasklet;
++ struct clk *clk;
++
++ u8 all_chan_mask;
++
++ /* hardware configuration */
++ unsigned char nr_masters;
++ unsigned char data_width[4];
++
++ struct dw_dma_chan chan[0];
++};
++
++static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
++{
++ return dw->regs;
++}
++
++#define dma_readl(dw, name) \
++ dma_readl_native(&(__dw_regs(dw)->name))
++#define dma_writel(dw, name, val) \
++ dma_writel_native((val), &(__dw_regs(dw)->name))
++
++#define channel_set_bit(dw, reg, mask) \
++ dma_writel(dw, reg, ((mask) << 8) | (mask))
++#define channel_clear_bit(dw, reg, mask) \
++ dma_writel(dw, reg, ((mask) << 8) | 0)
++
++static inline struct dw_dma *to_dw_dma(struct dma_device *ddev)
++{
++ return container_of(ddev, struct dw_dma, dma);
++}
++
++/* LLI == Linked List Item; a.k.a. DMA block descriptor */
++struct dw_lli {
++ /* values that are not changed by hardware */
++ u32 sar;
++ u32 dar;
++ u32 llp; /* chain to next lli */
++ u32 ctllo;
++ /* values that may get written back: */
++ u32 ctlhi;
++ /* sstat and dstat can snapshot peripheral register state.
++ * silicon config may discard either or both...
++ */
++ u32 sstat;
++ u32 dstat;
++};
++
++struct dw_desc {
++ /* FIRST values the hardware uses */
++ struct dw_lli lli;
++
++ /* THEN values for driver housekeeping */
++ struct list_head desc_node;
++ struct list_head tx_list;
++ struct dma_async_tx_descriptor txd;
++ size_t len;
++ size_t total_len;
++};
++
++#define to_dw_desc(h) list_entry(h, struct dw_desc, desc_node)
++
++static inline struct dw_desc *
++txd_to_dw_desc(struct dma_async_tx_descriptor *txd)
++{
++ return container_of(txd, struct dw_desc, txd);
++}
+--- a/drivers/dma/dw_dmac.c
++++ /dev/null
+@@ -1,1969 +0,0 @@
+-/*
+- * Core driver for the Synopsys DesignWare DMA Controller
+- *
+- * Copyright (C) 2007-2008 Atmel Corporation
+- * Copyright (C) 2010-2011 ST Microelectronics
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+- */
+-
+-#include <linux/bitops.h>
+-#include <linux/clk.h>
+-#include <linux/delay.h>
+-#include <linux/dmaengine.h>
+-#include <linux/dma-mapping.h>
+-#include <linux/dmapool.h>
+-#include <linux/err.h>
+-#include <linux/init.h>
+-#include <linux/interrupt.h>
+-#include <linux/io.h>
+-#include <linux/of.h>
+-#include <linux/of_dma.h>
+-#include <linux/mm.h>
+-#include <linux/module.h>
+-#include <linux/platform_device.h>
+-#include <linux/slab.h>
+-#include <linux/acpi.h>
+-#include <linux/acpi_dma.h>
+-
+-#include "dw_dmac_regs.h"
+-#include "dmaengine.h"
+-
+-/*
+- * This supports the Synopsys "DesignWare AHB Central DMA Controller",
+- * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
+- * of which use ARM any more). See the "Databook" from Synopsys for
+- * information beyond what licensees probably provide.
+- *
+- * The driver has currently been tested only with the Atmel AT32AP7000,
+- * which does not support descriptor writeback.
+- */
+-
+-static inline unsigned int dwc_get_dms(struct dw_dma_slave *slave)
+-{
+- return slave ? slave->dst_master : 0;
+-}
+-
+-static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave)
+-{
+- return slave ? slave->src_master : 1;
+-}
+-
+-static inline void dwc_set_masters(struct dw_dma_chan *dwc)
+-{
+- struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+- struct dw_dma_slave *dws = dwc->chan.private;
+- unsigned char mmax = dw->nr_masters - 1;
+-
+- if (dwc->request_line == ~0) {
+- dwc->src_master = min_t(unsigned char, mmax, dwc_get_sms(dws));
+- dwc->dst_master = min_t(unsigned char, mmax, dwc_get_dms(dws));
+- }
+-}
+-
+-#define DWC_DEFAULT_CTLLO(_chan) ({ \
+- struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \
+- struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
+- bool _is_slave = is_slave_direction(_dwc->direction); \
+- u8 _smsize = _is_slave ? _sconfig->src_maxburst : \
+- DW_DMA_MSIZE_16; \
+- u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \
+- DW_DMA_MSIZE_16; \
+- \
+- (DWC_CTLL_DST_MSIZE(_dmsize) \
+- | DWC_CTLL_SRC_MSIZE(_smsize) \
+- | DWC_CTLL_LLP_D_EN \
+- | DWC_CTLL_LLP_S_EN \
+- | DWC_CTLL_DMS(_dwc->dst_master) \
+- | DWC_CTLL_SMS(_dwc->src_master)); \
+- })
+-
+-/*
+- * Number of descriptors to allocate for each channel. This should be
+- * made configurable somehow; preferably, the clients (at least the
+- * ones using slave transfers) should be able to give us a hint.
+- */
+-#define NR_DESCS_PER_CHANNEL 64
+-
+-/*----------------------------------------------------------------------*/
+-
+-static struct device *chan2dev(struct dma_chan *chan)
+-{
+- return &chan->dev->device;
+-}
+-static struct device *chan2parent(struct dma_chan *chan)
+-{
+- return chan->dev->device.parent;
+-}
+-
+-static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
+-{
+- return to_dw_desc(dwc->active_list.next);
+-}
+-
+-static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
+-{
+- struct dw_desc *desc, *_desc;
+- struct dw_desc *ret = NULL;
+- unsigned int i = 0;
+- unsigned long flags;
+-
+- spin_lock_irqsave(&dwc->lock, flags);
+- list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
+- i++;
+- if (async_tx_test_ack(&desc->txd)) {
+- list_del(&desc->desc_node);
+- ret = desc;
+- break;
+- }
+- dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
+- }
+- spin_unlock_irqrestore(&dwc->lock, flags);
+-
+- dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
+-
+- return ret;
+-}
+-
+-/*
+- * Move a descriptor, including any children, to the free list.
+- * `desc' must not be on any lists.
+- */
+-static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
+-{
+- unsigned long flags;
+-
+- if (desc) {
+- struct dw_desc *child;
+-
+- spin_lock_irqsave(&dwc->lock, flags);
+- list_for_each_entry(child, &desc->tx_list, desc_node)
+- dev_vdbg(chan2dev(&dwc->chan),
+- "moving child desc %p to freelist\n",
+- child);
+- list_splice_init(&desc->tx_list, &dwc->free_list);
+- dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
+- list_add(&desc->desc_node, &dwc->free_list);
+- spin_unlock_irqrestore(&dwc->lock, flags);
+- }
+-}
+-
+-static void dwc_initialize(struct dw_dma_chan *dwc)
+-{
+- struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+- struct dw_dma_slave *dws = dwc->chan.private;
+- u32 cfghi = DWC_CFGH_FIFO_MODE;
+- u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
+-
+- if (dwc->initialized == true)
+- return;
+-
+- if (dws) {
+- /*
+- * We need controller-specific data to set up slave
+- * transfers.
+- */
+- BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
+-
+- cfghi = dws->cfg_hi;
+- cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
+- } else {
+- if (dwc->direction == DMA_MEM_TO_DEV)
+- cfghi = DWC_CFGH_DST_PER(dwc->request_line);
+- else if (dwc->direction == DMA_DEV_TO_MEM)
+- cfghi = DWC_CFGH_SRC_PER(dwc->request_line);
+- }
+-
+- channel_writel(dwc, CFG_LO, cfglo);
+- channel_writel(dwc, CFG_HI, cfghi);
+-
+- /* Enable interrupts */
+- channel_set_bit(dw, MASK.XFER, dwc->mask);
+- channel_set_bit(dw, MASK.ERROR, dwc->mask);
+-
+- dwc->initialized = true;
+-}
+-
+-/*----------------------------------------------------------------------*/
+-
+-static inline unsigned int dwc_fast_fls(unsigned long long v)
+-{
+- /*
+- * We can be a lot more clever here, but this should take care
+- * of the most common optimization.
+- */
+- if (!(v & 7))
+- return 3;
+- else if (!(v & 3))
+- return 2;
+- else if (!(v & 1))
+- return 1;
+- return 0;
+-}
+-
+-static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
+-{
+- dev_err(chan2dev(&dwc->chan),
+- " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
+- channel_readl(dwc, SAR),
+- channel_readl(dwc, DAR),
+- channel_readl(dwc, LLP),
+- channel_readl(dwc, CTL_HI),
+- channel_readl(dwc, CTL_LO));
+-}
+-
+-static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
+-{
+- channel_clear_bit(dw, CH_EN, dwc->mask);
+- while (dma_readl(dw, CH_EN) & dwc->mask)
+- cpu_relax();
+-}
+-
+-/*----------------------------------------------------------------------*/
+-
+-/* Perform single block transfer */
+-static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
+- struct dw_desc *desc)
+-{
+- struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+- u32 ctllo;
+-
+- /* Software emulation of LLP mode relies on interrupts to continue
+- * multi block transfer. */
+- ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN;
+-
+- channel_writel(dwc, SAR, desc->lli.sar);
+- channel_writel(dwc, DAR, desc->lli.dar);
+- channel_writel(dwc, CTL_LO, ctllo);
+- channel_writel(dwc, CTL_HI, desc->lli.ctlhi);
+- channel_set_bit(dw, CH_EN, dwc->mask);
+-
+- /* Move pointer to next descriptor */
+- dwc->tx_node_active = dwc->tx_node_active->next;
+-}
+-
+-/* Called with dwc->lock held and bh disabled */
+-static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
+-{
+- struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+- unsigned long was_soft_llp;
+-
+- /* ASSERT: channel is idle */
+- if (dma_readl(dw, CH_EN) & dwc->mask) {
+- dev_err(chan2dev(&dwc->chan),
+- "BUG: Attempted to start non-idle channel\n");
+- dwc_dump_chan_regs(dwc);
+-
+- /* The tasklet will hopefully advance the queue... */
+- return;
+- }
+-
+- if (dwc->nollp) {
+- was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP,
+- &dwc->flags);
+- if (was_soft_llp) {
+- dev_err(chan2dev(&dwc->chan),
+- "BUG: Attempted to start new LLP transfer "
+- "inside ongoing one\n");
+- return;
+- }
+-
+- dwc_initialize(dwc);
+-
+- dwc->residue = first->total_len;
+- dwc->tx_node_active = &first->tx_list;
+-
+- /* Submit first block */
+- dwc_do_single_block(dwc, first);
+-
+- return;
+- }
+-
+- dwc_initialize(dwc);
+-
+- channel_writel(dwc, LLP, first->txd.phys);
+- channel_writel(dwc, CTL_LO,
+- DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
+- channel_writel(dwc, CTL_HI, 0);
+- channel_set_bit(dw, CH_EN, dwc->mask);
+-}
+-
+-/*----------------------------------------------------------------------*/
+-
+-static void
+-dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
+- bool callback_required)
+-{
+- dma_async_tx_callback callback = NULL;
+- void *param = NULL;
+- struct dma_async_tx_descriptor *txd = &desc->txd;
+- struct dw_desc *child;
+- unsigned long flags;
+-
+- dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
+-
+- spin_lock_irqsave(&dwc->lock, flags);
+- dma_cookie_complete(txd);
+- if (callback_required) {
+- callback = txd->callback;
+- param = txd->callback_param;
+- }
+-
+- /* async_tx_ack */
+- list_for_each_entry(child, &desc->tx_list, desc_node)
+- async_tx_ack(&child->txd);
+- async_tx_ack(&desc->txd);
+-
+- list_splice_init(&desc->tx_list, &dwc->free_list);
+- list_move(&desc->desc_node, &dwc->free_list);
+-
+- if (!is_slave_direction(dwc->direction)) {
+- struct device *parent = chan2parent(&dwc->chan);
+- if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+- if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
+- dma_unmap_single(parent, desc->lli.dar,
+- desc->total_len, DMA_FROM_DEVICE);
+- else
+- dma_unmap_page(parent, desc->lli.dar,
+- desc->total_len, DMA_FROM_DEVICE);
+- }
+- if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+- if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
+- dma_unmap_single(parent, desc->lli.sar,
+- desc->total_len, DMA_TO_DEVICE);
+- else
+- dma_unmap_page(parent, desc->lli.sar,
+- desc->total_len, DMA_TO_DEVICE);
+- }
+- }
+-
+- spin_unlock_irqrestore(&dwc->lock, flags);
+-
+- if (callback)
+- callback(param);
+-}
+-
+-static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
+-{
+- struct dw_desc *desc, *_desc;
+- LIST_HEAD(list);
+- unsigned long flags;
+-
+- spin_lock_irqsave(&dwc->lock, flags);
+- if (dma_readl(dw, CH_EN) & dwc->mask) {
+- dev_err(chan2dev(&dwc->chan),
+- "BUG: XFER bit set, but channel not idle!\n");
+-
+- /* Try to continue after resetting the channel... */
+- dwc_chan_disable(dw, dwc);
+- }
+-
+- /*
+- * Submit queued descriptors ASAP, i.e. before we go through
+- * the completed ones.
+- */
+- list_splice_init(&dwc->active_list, &list);
+- if (!list_empty(&dwc->queue)) {
+- list_move(dwc->queue.next, &dwc->active_list);
+- dwc_dostart(dwc, dwc_first_active(dwc));
+- }
+-
+- spin_unlock_irqrestore(&dwc->lock, flags);
+-
+- list_for_each_entry_safe(desc, _desc, &list, desc_node)
+- dwc_descriptor_complete(dwc, desc, true);
+-}
+-
+-/* Returns how many bytes were already received from source */
+-static inline u32 dwc_get_sent(struct dw_dma_chan *dwc)
+-{
+- u32 ctlhi = channel_readl(dwc, CTL_HI);
+- u32 ctllo = channel_readl(dwc, CTL_LO);
+-
+- return (ctlhi & DWC_CTLH_BLOCK_TS_MASK) * (1 << (ctllo >> 4 & 7));
+-}
+-
+-static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
+-{
+- dma_addr_t llp;
+- struct dw_desc *desc, *_desc;
+- struct dw_desc *child;
+- u32 status_xfer;
+- unsigned long flags;
+-
+- spin_lock_irqsave(&dwc->lock, flags);
+- llp = channel_readl(dwc, LLP);
+- status_xfer = dma_readl(dw, RAW.XFER);
+-
+- if (status_xfer & dwc->mask) {
+- /* Everything we've submitted is done */
+- dma_writel(dw, CLEAR.XFER, dwc->mask);
+-
+- if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
+- struct list_head *head, *active = dwc->tx_node_active;
+-
+- /*
+- * We are inside first active descriptor.
+- * Otherwise something is really wrong.
+- */
+- desc = dwc_first_active(dwc);
+-
+- head = &desc->tx_list;
+- if (active != head) {
+- /* Update desc to reflect last sent one */
+- if (active != head->next)
+- desc = to_dw_desc(active->prev);
+-
+- dwc->residue -= desc->len;
+-
+- child = to_dw_desc(active);
+-
+- /* Submit next block */
+- dwc_do_single_block(dwc, child);
+-
+- spin_unlock_irqrestore(&dwc->lock, flags);
+- return;
+- }
+-
+- /* We are done here */
+- clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
+- }
+-
+- dwc->residue = 0;
+-
+- spin_unlock_irqrestore(&dwc->lock, flags);
+-
+- dwc_complete_all(dw, dwc);
+- return;
+- }
+-
+- if (list_empty(&dwc->active_list)) {
+- dwc->residue = 0;
+- spin_unlock_irqrestore(&dwc->lock, flags);
+- return;
+- }
+-
+- if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
+- dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__);
+- spin_unlock_irqrestore(&dwc->lock, flags);
+- return;
+- }
+-
+- dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__,
+- (unsigned long long)llp);
+-
+- list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
+- /* Initial residue value */
+- dwc->residue = desc->total_len;
+-
+- /* Check first descriptors addr */
+- if (desc->txd.phys == llp) {
+- spin_unlock_irqrestore(&dwc->lock, flags);
+- return;
+- }
+-
+- /* Check first descriptors llp */
+- if (desc->lli.llp == llp) {
+- /* This one is currently in progress */
+- dwc->residue -= dwc_get_sent(dwc);
+- spin_unlock_irqrestore(&dwc->lock, flags);
+- return;
+- }
+-
+- dwc->residue -= desc->len;
+- list_for_each_entry(child, &desc->tx_list, desc_node) {
+- if (child->lli.llp == llp) {
+- /* Currently in progress */
+- dwc->residue -= dwc_get_sent(dwc);
+- spin_unlock_irqrestore(&dwc->lock, flags);
+- return;
+- }
+- dwc->residue -= child->len;
+- }
+-
+- /*
+- * No descriptors so far seem to be in progress, i.e.
+- * this one must be done.
+- */
+- spin_unlock_irqrestore(&dwc->lock, flags);
+- dwc_descriptor_complete(dwc, desc, true);
+- spin_lock_irqsave(&dwc->lock, flags);
+- }
+-
+- dev_err(chan2dev(&dwc->chan),
+- "BUG: All descriptors done, but channel not idle!\n");
+-
+- /* Try to continue after resetting the channel... */
+- dwc_chan_disable(dw, dwc);
+-
+- if (!list_empty(&dwc->queue)) {
+- list_move(dwc->queue.next, &dwc->active_list);
+- dwc_dostart(dwc, dwc_first_active(dwc));
+- }
+- spin_unlock_irqrestore(&dwc->lock, flags);
+-}
+-
+-static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
+-{
+- dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
+- lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo);
+-}
+-
+-static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
+-{
+- struct dw_desc *bad_desc;
+- struct dw_desc *child;
+- unsigned long flags;
+-
+- dwc_scan_descriptors(dw, dwc);
+-
+- spin_lock_irqsave(&dwc->lock, flags);
+-
+- /*
+- * The descriptor currently at the head of the active list is
+- * borked. Since we don't have any way to report errors, we'll
+- * just have to scream loudly and try to carry on.
+- */
+- bad_desc = dwc_first_active(dwc);
+- list_del_init(&bad_desc->desc_node);
+- list_move(dwc->queue.next, dwc->active_list.prev);
+-
+- /* Clear the error flag and try to restart the controller */
+- dma_writel(dw, CLEAR.ERROR, dwc->mask);
+- if (!list_empty(&dwc->active_list))
+- dwc_dostart(dwc, dwc_first_active(dwc));
+-
+- /*
+- * WARN may seem harsh, but since this only happens
+- * when someone submits a bad physical address in a
+- * descriptor, we should consider ourselves lucky that the
+- * controller flagged an error instead of scribbling over
+- * random memory locations.
+- */
+- dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n"
+- " cookie: %d\n", bad_desc->txd.cookie);
+- dwc_dump_lli(dwc, &bad_desc->lli);
+- list_for_each_entry(child, &bad_desc->tx_list, desc_node)
+- dwc_dump_lli(dwc, &child->lli);
+-
+- spin_unlock_irqrestore(&dwc->lock, flags);
+-
+- /* Pretend the descriptor completed successfully */
+- dwc_descriptor_complete(dwc, bad_desc, true);
+-}
+-
+-/* --------------------- Cyclic DMA API extensions -------------------- */
+-
+-dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
+-{
+- struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+- return channel_readl(dwc, SAR);
+-}
+-EXPORT_SYMBOL(dw_dma_get_src_addr);
+-
+-dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
+-{
+- struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+- return channel_readl(dwc, DAR);
+-}
+-EXPORT_SYMBOL(dw_dma_get_dst_addr);
+-
+-/* Called with dwc->lock held and all DMAC interrupts disabled */
+-static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
+- u32 status_err, u32 status_xfer)
+-{
+- unsigned long flags;
+-
+- if (dwc->mask) {
+- void (*callback)(void *param);
+- void *callback_param;
+-
+- dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
+- channel_readl(dwc, LLP));
+-
+- callback = dwc->cdesc->period_callback;
+- callback_param = dwc->cdesc->period_callback_param;
+-
+- if (callback)
+- callback(callback_param);
+- }
+-
+- /*
+- * Error and transfer complete are highly unlikely, and will most
+- * likely be due to a configuration error by the user.
+- */
+- if (unlikely(status_err & dwc->mask) ||
+- unlikely(status_xfer & dwc->mask)) {
+- int i;
+-
+- dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
+- "interrupt, stopping DMA transfer\n",
+- status_xfer ? "xfer" : "error");
+-
+- spin_lock_irqsave(&dwc->lock, flags);
+-
+- dwc_dump_chan_regs(dwc);
+-
+- dwc_chan_disable(dw, dwc);
+-
+- /* Make sure DMA does not restart by loading a new list */
+- channel_writel(dwc, LLP, 0);
+- channel_writel(dwc, CTL_LO, 0);
+- channel_writel(dwc, CTL_HI, 0);
+-
+- dma_writel(dw, CLEAR.ERROR, dwc->mask);
+- dma_writel(dw, CLEAR.XFER, dwc->mask);
+-
+- for (i = 0; i < dwc->cdesc->periods; i++)
+- dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
+-
+- spin_unlock_irqrestore(&dwc->lock, flags);
+- }
+-}
+-
+-/* ------------------------------------------------------------------------- */
+-
+-static void dw_dma_tasklet(unsigned long data)
+-{
+- struct dw_dma *dw = (struct dw_dma *)data;
+- struct dw_dma_chan *dwc;
+- u32 status_xfer;
+- u32 status_err;
+- int i;
+-
+- status_xfer = dma_readl(dw, RAW.XFER);
+- status_err = dma_readl(dw, RAW.ERROR);
+-
+- dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err);
+-
+- for (i = 0; i < dw->dma.chancnt; i++) {
+- dwc = &dw->chan[i];
+- if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
+- dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
+- else if (status_err & (1 << i))
+- dwc_handle_error(dw, dwc);
+- else if (status_xfer & (1 << i))
+- dwc_scan_descriptors(dw, dwc);
+- }
+-
+- /*
+- * Re-enable interrupts.
+- */
+- channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
+- channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
+-}
+-
+-static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
+-{
+- struct dw_dma *dw = dev_id;
+- u32 status;
+-
+- dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__,
+- dma_readl(dw, STATUS_INT));
+-
+- /*
+- * Just disable the interrupts. We'll turn them back on in the
+- * softirq handler.
+- */
+- channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
+- channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
+-
+- status = dma_readl(dw, STATUS_INT);
+- if (status) {
+- dev_err(dw->dma.dev,
+- "BUG: Unexpected interrupts pending: 0x%x\n",
+- status);
+-
+- /* Try to recover */
+- channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
+- channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
+- channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
+- channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
+- }
+-
+- tasklet_schedule(&dw->tasklet);
+-
+- return IRQ_HANDLED;
+-}
+-
+-/*----------------------------------------------------------------------*/
+-
+-static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
+-{
+- struct dw_desc *desc = txd_to_dw_desc(tx);
+- struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
+- dma_cookie_t cookie;
+- unsigned long flags;
+-
+- spin_lock_irqsave(&dwc->lock, flags);
+- cookie = dma_cookie_assign(tx);
+-
+- /*
+- * REVISIT: We should attempt to chain as many descriptors as
+- * possible, perhaps even appending to those already submitted
+- * for DMA. But this is hard to do in a race-free manner.
+- */
+- if (list_empty(&dwc->active_list)) {
+- dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__,
+- desc->txd.cookie);
+- list_add_tail(&desc->desc_node, &dwc->active_list);
+- dwc_dostart(dwc, dwc_first_active(dwc));
+- } else {
+- dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__,
+- desc->txd.cookie);
+-
+- list_add_tail(&desc->desc_node, &dwc->queue);
+- }
+-
+- spin_unlock_irqrestore(&dwc->lock, flags);
+-
+- return cookie;
+-}
+-
+-static struct dma_async_tx_descriptor *
+-dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+- size_t len, unsigned long flags)
+-{
+- struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+- struct dw_dma *dw = to_dw_dma(chan->device);
+- struct dw_desc *desc;
+- struct dw_desc *first;
+- struct dw_desc *prev;
+- size_t xfer_count;
+- size_t offset;
+- unsigned int src_width;
+- unsigned int dst_width;
+- unsigned int data_width;
+- u32 ctllo;
+-
+- dev_vdbg(chan2dev(chan),
+- "%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__,
+- (unsigned long long)dest, (unsigned long long)src,
+- len, flags);
+-
+- if (unlikely(!len)) {
+- dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
+- return NULL;
+- }
+-
+- dwc->direction = DMA_MEM_TO_MEM;
+-
+- data_width = min_t(unsigned int, dw->data_width[dwc->src_master],
+- dw->data_width[dwc->dst_master]);
+-
+- src_width = dst_width = min_t(unsigned int, data_width,
+- dwc_fast_fls(src | dest | len));
+-
+- ctllo = DWC_DEFAULT_CTLLO(chan)
+- | DWC_CTLL_DST_WIDTH(dst_width)
+- | DWC_CTLL_SRC_WIDTH(src_width)
+- | DWC_CTLL_DST_INC
+- | DWC_CTLL_SRC_INC
+- | DWC_CTLL_FC_M2M;
+- prev = first = NULL;
+-
+- for (offset = 0; offset < len; offset += xfer_count << src_width) {
+- xfer_count = min_t(size_t, (len - offset) >> src_width,
+- dwc->block_size);
+-
+- desc = dwc_desc_get(dwc);
+- if (!desc)
+- goto err_desc_get;
+-
+- desc->lli.sar = src + offset;
+- desc->lli.dar = dest + offset;
+- desc->lli.ctllo = ctllo;
+- desc->lli.ctlhi = xfer_count;
+- desc->len = xfer_count << src_width;
+-
+- if (!first) {
+- first = desc;
+- } else {
+- prev->lli.llp = desc->txd.phys;
+- list_add_tail(&desc->desc_node,
+- &first->tx_list);
+- }
+- prev = desc;
+- }
+-
+- if (flags & DMA_PREP_INTERRUPT)
+- /* Trigger interrupt after last block */
+- prev->lli.ctllo |= DWC_CTLL_INT_EN;
+-
+- prev->lli.llp = 0;
+- first->txd.flags = flags;
+- first->total_len = len;
+-
+- return &first->txd;
+-
+-err_desc_get:
+- dwc_desc_put(dwc, first);
+- return NULL;
+-}
+-
+-static struct dma_async_tx_descriptor *
+-dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+- unsigned int sg_len, enum dma_transfer_direction direction,
+- unsigned long flags, void *context)
+-{
+- struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+- struct dw_dma *dw = to_dw_dma(chan->device);
+- struct dma_slave_config *sconfig = &dwc->dma_sconfig;
+- struct dw_desc *prev;
+- struct dw_desc *first;
+- u32 ctllo;
+- dma_addr_t reg;
+- unsigned int reg_width;
+- unsigned int mem_width;
+- unsigned int data_width;
+- unsigned int i;
+- struct scatterlist *sg;
+- size_t total_len = 0;
+-
+- dev_vdbg(chan2dev(chan), "%s\n", __func__);
+-
+- if (unlikely(!is_slave_direction(direction) || !sg_len))
+- return NULL;
+-
+- dwc->direction = direction;
+-
+- prev = first = NULL;
+-
+- switch (direction) {
+- case DMA_MEM_TO_DEV:
+- reg_width = __fls(sconfig->dst_addr_width);
+- reg = sconfig->dst_addr;
+- ctllo = (DWC_DEFAULT_CTLLO(chan)
+- | DWC_CTLL_DST_WIDTH(reg_width)
+- | DWC_CTLL_DST_FIX
+- | DWC_CTLL_SRC_INC);
+-
+- ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
+- DWC_CTLL_FC(DW_DMA_FC_D_M2P);
+-
+- data_width = dw->data_width[dwc->src_master];
+-
+- for_each_sg(sgl, sg, sg_len, i) {
+- struct dw_desc *desc;
+- u32 len, dlen, mem;
+-
+- mem = sg_dma_address(sg);
+- len = sg_dma_len(sg);
+-
+- mem_width = min_t(unsigned int,
+- data_width, dwc_fast_fls(mem | len));
+-
+-slave_sg_todev_fill_desc:
+- desc = dwc_desc_get(dwc);
+- if (!desc) {
+- dev_err(chan2dev(chan),
+- "not enough descriptors available\n");
+- goto err_desc_get;
+- }
+-
+- desc->lli.sar = mem;
+- desc->lli.dar = reg;
+- desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
+- if ((len >> mem_width) > dwc->block_size) {
+- dlen = dwc->block_size << mem_width;
+- mem += dlen;
+- len -= dlen;
+- } else {
+- dlen = len;
+- len = 0;
+- }
+-
+- desc->lli.ctlhi = dlen >> mem_width;
+- desc->len = dlen;
+-
+- if (!first) {
+- first = desc;
+- } else {
+- prev->lli.llp = desc->txd.phys;
+- list_add_tail(&desc->desc_node,
+- &first->tx_list);
+- }
+- prev = desc;
+- total_len += dlen;
+-
+- if (len)
+- goto slave_sg_todev_fill_desc;
+- }
+- break;
+- case DMA_DEV_TO_MEM:
+- reg_width = __fls(sconfig->src_addr_width);
+- reg = sconfig->src_addr;
+- ctllo = (DWC_DEFAULT_CTLLO(chan)
+- | DWC_CTLL_SRC_WIDTH(reg_width)
+- | DWC_CTLL_DST_INC
+- | DWC_CTLL_SRC_FIX);
+-
+- ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
+- DWC_CTLL_FC(DW_DMA_FC_D_P2M);
+-
+- data_width = dw->data_width[dwc->dst_master];
+-
+- for_each_sg(sgl, sg, sg_len, i) {
+- struct dw_desc *desc;
+- u32 len, dlen, mem;
+-
+- mem = sg_dma_address(sg);
+- len = sg_dma_len(sg);
+-
+- mem_width = min_t(unsigned int,
+- data_width, dwc_fast_fls(mem | len));
+-
+-slave_sg_fromdev_fill_desc:
+- desc = dwc_desc_get(dwc);
+- if (!desc) {
+- dev_err(chan2dev(chan),
+- "not enough descriptors available\n");
+- goto err_desc_get;
+- }
+-
+- desc->lli.sar = reg;
+- desc->lli.dar = mem;
+- desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
+- if ((len >> reg_width) > dwc->block_size) {
+- dlen = dwc->block_size << reg_width;
+- mem += dlen;
+- len -= dlen;
+- } else {
+- dlen = len;
+- len = 0;
+- }
+- desc->lli.ctlhi = dlen >> reg_width;
+- desc->len = dlen;
+-
+- if (!first) {
+- first = desc;
+- } else {
+- prev->lli.llp = desc->txd.phys;
+- list_add_tail(&desc->desc_node,
+- &first->tx_list);
+- }
+- prev = desc;
+- total_len += dlen;
+-
+- if (len)
+- goto slave_sg_fromdev_fill_desc;
+- }
+- break;
+- default:
+- return NULL;
+- }
+-
+- if (flags & DMA_PREP_INTERRUPT)
+- /* Trigger interrupt after last block */
+- prev->lli.ctllo |= DWC_CTLL_INT_EN;
+-
+- prev->lli.llp = 0;
+- first->total_len = total_len;
+-
+- return &first->txd;
+-
+-err_desc_get:
+- dwc_desc_put(dwc, first);
+- return NULL;
+-}
+-
+-/*
+- * Fix sconfig's burst size according to dw_dmac. We need to convert them as:
+- * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
+- *
+- * NOTE: burst size 2 is not supported by controller.
+- *
+- * This can be done by finding least significant bit set: n & (n - 1)
+- */
+-static inline void convert_burst(u32 *maxburst)
+-{
+- if (*maxburst > 1)
+- *maxburst = fls(*maxburst) - 2;
+- else
+- *maxburst = 0;
+-}
+-
+-static int
+-set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
+-{
+- struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+-
+- /* Check if chan will be configured for slave transfers */
+- if (!is_slave_direction(sconfig->direction))
+- return -EINVAL;
+-
+- memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
+- dwc->direction = sconfig->direction;
+-
+- /* Take the request line from slave_id member */
+- if (dwc->request_line == ~0)
+- dwc->request_line = sconfig->slave_id;
+-
+- convert_burst(&dwc->dma_sconfig.src_maxburst);
+- convert_burst(&dwc->dma_sconfig.dst_maxburst);
+-
+- return 0;
+-}
+-
+-static inline void dwc_chan_pause(struct dw_dma_chan *dwc)
+-{
+- u32 cfglo = channel_readl(dwc, CFG_LO);
+- unsigned int count = 20; /* timeout iterations */
+-
+- channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
+- while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--)
+- udelay(2);
+-
+- dwc->paused = true;
+-}
+-
+-static inline void dwc_chan_resume(struct dw_dma_chan *dwc)
+-{
+- u32 cfglo = channel_readl(dwc, CFG_LO);
+-
+- channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
+-
+- dwc->paused = false;
+-}
+-
+-static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+- unsigned long arg)
+-{
+- struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+- struct dw_dma *dw = to_dw_dma(chan->device);
+- struct dw_desc *desc, *_desc;
+- unsigned long flags;
+- LIST_HEAD(list);
+-
+- if (cmd == DMA_PAUSE) {
+- spin_lock_irqsave(&dwc->lock, flags);
+-
+- dwc_chan_pause(dwc);
+-
+- spin_unlock_irqrestore(&dwc->lock, flags);
+- } else if (cmd == DMA_RESUME) {
+- if (!dwc->paused)
+- return 0;
+-
+- spin_lock_irqsave(&dwc->lock, flags);
+-
+- dwc_chan_resume(dwc);
+-
+- spin_unlock_irqrestore(&dwc->lock, flags);
+- } else if (cmd == DMA_TERMINATE_ALL) {
+- spin_lock_irqsave(&dwc->lock, flags);
+-
+- clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
+-
+- dwc_chan_disable(dw, dwc);
+-
+- dwc_chan_resume(dwc);
+-
+- /* active_list entries will end up before queued entries */
+- list_splice_init(&dwc->queue, &list);
+- list_splice_init(&dwc->active_list, &list);
+-
+- spin_unlock_irqrestore(&dwc->lock, flags);
+-
+- /* Flush all pending and queued descriptors */
+- list_for_each_entry_safe(desc, _desc, &list, desc_node)
+- dwc_descriptor_complete(dwc, desc, false);
+- } else if (cmd == DMA_SLAVE_CONFIG) {
+- return set_runtime_config(chan, (struct dma_slave_config *)arg);
+- } else {
+- return -ENXIO;
+- }
+-
+- return 0;
+-}
+-
+-static inline u32 dwc_get_residue(struct dw_dma_chan *dwc)
+-{
+- unsigned long flags;
+- u32 residue;
+-
+- spin_lock_irqsave(&dwc->lock, flags);
+-
+- residue = dwc->residue;
+- if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue)
+- residue -= dwc_get_sent(dwc);
+-
+- spin_unlock_irqrestore(&dwc->lock, flags);
+- return residue;
+-}
+-
+-static enum dma_status
+-dwc_tx_status(struct dma_chan *chan,
+- dma_cookie_t cookie,
+- struct dma_tx_state *txstate)
+-{
+- struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+- enum dma_status ret;
+-
+- ret = dma_cookie_status(chan, cookie, txstate);
+- if (ret != DMA_SUCCESS) {
+- dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
+-
+- ret = dma_cookie_status(chan, cookie, txstate);
+- }
+-
+- if (ret != DMA_SUCCESS)
+- dma_set_residue(txstate, dwc_get_residue(dwc));
+-
+- if (dwc->paused)
+- return DMA_PAUSED;
+-
+- return ret;
+-}
+-
+-static void dwc_issue_pending(struct dma_chan *chan)
+-{
+- struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+-
+- if (!list_empty(&dwc->queue))
+- dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
+-}
+-
+-static int dwc_alloc_chan_resources(struct dma_chan *chan)
+-{
+- struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+- struct dw_dma *dw = to_dw_dma(chan->device);
+- struct dw_desc *desc;
+- int i;
+- unsigned long flags;
+-
+- dev_vdbg(chan2dev(chan), "%s\n", __func__);
+-
+- /* ASSERT: channel is idle */
+- if (dma_readl(dw, CH_EN) & dwc->mask) {
+- dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
+- return -EIO;
+- }
+-
+- dma_cookie_init(chan);
+-
+- /*
+- * NOTE: some controllers may have additional features that we
+- * need to initialize here, like "scatter-gather" (which
+- * doesn't mean what you think it means), and status writeback.
+- */
+-
+- dwc_set_masters(dwc);
+-
+- spin_lock_irqsave(&dwc->lock, flags);
+- i = dwc->descs_allocated;
+- while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
+- dma_addr_t phys;
+-
+- spin_unlock_irqrestore(&dwc->lock, flags);
+-
+- desc = dma_pool_alloc(dw->desc_pool, GFP_ATOMIC, &phys);
+- if (!desc)
+- goto err_desc_alloc;
+-
+- memset(desc, 0, sizeof(struct dw_desc));
+-
+- INIT_LIST_HEAD(&desc->tx_list);
+- dma_async_tx_descriptor_init(&desc->txd, chan);
+- desc->txd.tx_submit = dwc_tx_submit;
+- desc->txd.flags = DMA_CTRL_ACK;
+- desc->txd.phys = phys;
+-
+- dwc_desc_put(dwc, desc);
+-
+- spin_lock_irqsave(&dwc->lock, flags);
+- i = ++dwc->descs_allocated;
+- }
+-
+- spin_unlock_irqrestore(&dwc->lock, flags);
+-
+- dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
+-
+- return i;
+-
+-err_desc_alloc:
+- dev_info(chan2dev(chan), "only allocated %d descriptors\n", i);
+-
+- return i;
+-}
+-
+-static void dwc_free_chan_resources(struct dma_chan *chan)
+-{
+- struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+- struct dw_dma *dw = to_dw_dma(chan->device);
+- struct dw_desc *desc, *_desc;
+- unsigned long flags;
+- LIST_HEAD(list);
+-
+- dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__,
+- dwc->descs_allocated);
+-
+- /* ASSERT: channel is idle */
+- BUG_ON(!list_empty(&dwc->active_list));
+- BUG_ON(!list_empty(&dwc->queue));
+- BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
+-
+- spin_lock_irqsave(&dwc->lock, flags);
+- list_splice_init(&dwc->free_list, &list);
+- dwc->descs_allocated = 0;
+- dwc->initialized = false;
+- dwc->request_line = ~0;
+-
+- /* Disable interrupts */
+- channel_clear_bit(dw, MASK.XFER, dwc->mask);
+- channel_clear_bit(dw, MASK.ERROR, dwc->mask);
+-
+- spin_unlock_irqrestore(&dwc->lock, flags);
+-
+- list_for_each_entry_safe(desc, _desc, &list, desc_node) {
+- dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
+- dma_pool_free(dw->desc_pool, desc, desc->txd.phys);
+- }
+-
+- dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
+-}
+-
+-/*----------------------------------------------------------------------*/
+-
+-struct dw_dma_of_filter_args {
+- struct dw_dma *dw;
+- unsigned int req;
+- unsigned int src;
+- unsigned int dst;
+-};
+-
+-static bool dw_dma_of_filter(struct dma_chan *chan, void *param)
+-{
+- struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+- struct dw_dma_of_filter_args *fargs = param;
+-
+- /* Ensure the device matches our channel */
+- if (chan->device != &fargs->dw->dma)
+- return false;
+-
+- dwc->request_line = fargs->req;
+- dwc->src_master = fargs->src;
+- dwc->dst_master = fargs->dst;
+-
+- return true;
+-}
+-
+-static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
+- struct of_dma *ofdma)
+-{
+- struct dw_dma *dw = ofdma->of_dma_data;
+- struct dw_dma_of_filter_args fargs = {
+- .dw = dw,
+- };
+- dma_cap_mask_t cap;
+-
+- if (dma_spec->args_count != 3)
+- return NULL;
+-
+- fargs.req = dma_spec->args[0];
+- fargs.src = dma_spec->args[1];
+- fargs.dst = dma_spec->args[2];
+-
+- if (WARN_ON(fargs.req >= DW_DMA_MAX_NR_REQUESTS ||
+- fargs.src >= dw->nr_masters ||
+- fargs.dst >= dw->nr_masters))
+- return NULL;
+-
+- dma_cap_zero(cap);
+- dma_cap_set(DMA_SLAVE, cap);
+-
+- /* TODO: there should be a simpler way to do this */
+- return dma_request_channel(cap, dw_dma_of_filter, &fargs);
+-}
+-
+-#ifdef CONFIG_ACPI
+-static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param)
+-{
+- struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+- struct acpi_dma_spec *dma_spec = param;
+-
+- if (chan->device->dev != dma_spec->dev ||
+- chan->chan_id != dma_spec->chan_id)
+- return false;
+-
+- dwc->request_line = dma_spec->slave_id;
+- dwc->src_master = dwc_get_sms(NULL);
+- dwc->dst_master = dwc_get_dms(NULL);
+-
+- return true;
+-}
+-
+-static void dw_dma_acpi_controller_register(struct dw_dma *dw)
+-{
+- struct device *dev = dw->dma.dev;
+- struct acpi_dma_filter_info *info;
+- int ret;
+-
+- info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+- if (!info)
+- return;
+-
+- dma_cap_zero(info->dma_cap);
+- dma_cap_set(DMA_SLAVE, info->dma_cap);
+- info->filter_fn = dw_dma_acpi_filter;
+-
+- ret = devm_acpi_dma_controller_register(dev, acpi_dma_simple_xlate,
+- info);
+- if (ret)
+- dev_err(dev, "could not register acpi_dma_controller\n");
+-}
+-#else /* !CONFIG_ACPI */
+-static inline void dw_dma_acpi_controller_register(struct dw_dma *dw) {}
+-#endif /* !CONFIG_ACPI */
+-
+-/* --------------------- Cyclic DMA API extensions -------------------- */
+-
+-/**
+- * dw_dma_cyclic_start - start the cyclic DMA transfer
+- * @chan: the DMA channel to start
+- *
+- * Must be called with soft interrupts disabled. Returns zero on success or
+- * -errno on failure.
+- */
+-int dw_dma_cyclic_start(struct dma_chan *chan)
+-{
+- struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+- struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+- unsigned long flags;
+-
+- if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
+- dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
+- return -ENODEV;
+- }
+-
+- spin_lock_irqsave(&dwc->lock, flags);
+-
+- /* Assert channel is idle */
+- if (dma_readl(dw, CH_EN) & dwc->mask) {
+- dev_err(chan2dev(&dwc->chan),
+- "BUG: Attempted to start non-idle channel\n");
+- dwc_dump_chan_regs(dwc);
+- spin_unlock_irqrestore(&dwc->lock, flags);
+- return -EBUSY;
+- }
+-
+- dma_writel(dw, CLEAR.ERROR, dwc->mask);
+- dma_writel(dw, CLEAR.XFER, dwc->mask);
+-
+- /* Setup DMAC channel registers */
+- channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
+- channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
+- channel_writel(dwc, CTL_HI, 0);
+-
+- channel_set_bit(dw, CH_EN, dwc->mask);
+-
+- spin_unlock_irqrestore(&dwc->lock, flags);
+-
+- return 0;
+-}
+-EXPORT_SYMBOL(dw_dma_cyclic_start);
+-
+-/**
+- * dw_dma_cyclic_stop - stop the cyclic DMA transfer
+- * @chan: the DMA channel to stop
+- *
+- * Must be called with soft interrupts disabled.
+- */
+-void dw_dma_cyclic_stop(struct dma_chan *chan)
+-{
+- struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+- struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+- unsigned long flags;
+-
+- spin_lock_irqsave(&dwc->lock, flags);
+-
+- dwc_chan_disable(dw, dwc);
+-
+- spin_unlock_irqrestore(&dwc->lock, flags);
+-}
+-EXPORT_SYMBOL(dw_dma_cyclic_stop);
+-
+-/**
+- * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
+- * @chan: the DMA channel to prepare
+- * @buf_addr: physical DMA address where the buffer starts
+- * @buf_len: total number of bytes for the entire buffer
+- * @period_len: number of bytes for each period
+- * @direction: transfer direction, to or from device
+- *
+- * Must be called before trying to start the transfer. Returns a valid struct
+- * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
+- */
+-struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
+- dma_addr_t buf_addr, size_t buf_len, size_t period_len,
+- enum dma_transfer_direction direction)
+-{
+- struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+- struct dma_slave_config *sconfig = &dwc->dma_sconfig;
+- struct dw_cyclic_desc *cdesc;
+- struct dw_cyclic_desc *retval = NULL;
+- struct dw_desc *desc;
+- struct dw_desc *last = NULL;
+- unsigned long was_cyclic;
+- unsigned int reg_width;
+- unsigned int periods;
+- unsigned int i;
+- unsigned long flags;
+-
+- spin_lock_irqsave(&dwc->lock, flags);
+- if (dwc->nollp) {
+- spin_unlock_irqrestore(&dwc->lock, flags);
+- dev_dbg(chan2dev(&dwc->chan),
+- "channel doesn't support LLP transfers\n");
+- return ERR_PTR(-EINVAL);
+- }
+-
+- if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
+- spin_unlock_irqrestore(&dwc->lock, flags);
+- dev_dbg(chan2dev(&dwc->chan),
+- "queue and/or active list are not empty\n");
+- return ERR_PTR(-EBUSY);
+- }
+-
+- was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
+- spin_unlock_irqrestore(&dwc->lock, flags);
+- if (was_cyclic) {
+- dev_dbg(chan2dev(&dwc->chan),
+- "channel already prepared for cyclic DMA\n");
+- return ERR_PTR(-EBUSY);
+- }
+-
+- retval = ERR_PTR(-EINVAL);
+-
+- if (unlikely(!is_slave_direction(direction)))
+- goto out_err;
+-
+- dwc->direction = direction;
+-
+- if (direction == DMA_MEM_TO_DEV)
+- reg_width = __ffs(sconfig->dst_addr_width);
+- else
+- reg_width = __ffs(sconfig->src_addr_width);
+-
+- periods = buf_len / period_len;
+-
+- /* Check for too big/unaligned periods and unaligned DMA buffer. */
+- if (period_len > (dwc->block_size << reg_width))
+- goto out_err;
+- if (unlikely(period_len & ((1 << reg_width) - 1)))
+- goto out_err;
+- if (unlikely(buf_addr & ((1 << reg_width) - 1)))
+- goto out_err;
+-
+- retval = ERR_PTR(-ENOMEM);
+-
+- if (periods > NR_DESCS_PER_CHANNEL)
+- goto out_err;
+-
+- cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
+- if (!cdesc)
+- goto out_err;
+-
+- cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
+- if (!cdesc->desc)
+- goto out_err_alloc;
+-
+- for (i = 0; i < periods; i++) {
+- desc = dwc_desc_get(dwc);
+- if (!desc)
+- goto out_err_desc_get;
+-
+- switch (direction) {
+- case DMA_MEM_TO_DEV:
+- desc->lli.dar = sconfig->dst_addr;
+- desc->lli.sar = buf_addr + (period_len * i);
+- desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
+- | DWC_CTLL_DST_WIDTH(reg_width)
+- | DWC_CTLL_SRC_WIDTH(reg_width)
+- | DWC_CTLL_DST_FIX
+- | DWC_CTLL_SRC_INC
+- | DWC_CTLL_INT_EN);
+-
+- desc->lli.ctllo |= sconfig->device_fc ?
+- DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
+- DWC_CTLL_FC(DW_DMA_FC_D_M2P);
+-
+- break;
+- case DMA_DEV_TO_MEM:
+- desc->lli.dar = buf_addr + (period_len * i);
+- desc->lli.sar = sconfig->src_addr;
+- desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
+- | DWC_CTLL_SRC_WIDTH(reg_width)
+- | DWC_CTLL_DST_WIDTH(reg_width)
+- | DWC_CTLL_DST_INC
+- | DWC_CTLL_SRC_FIX
+- | DWC_CTLL_INT_EN);
+-
+- desc->lli.ctllo |= sconfig->device_fc ?
+- DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
+- DWC_CTLL_FC(DW_DMA_FC_D_P2M);
+-
+- break;
+- default:
+- break;
+- }
+-
+- desc->lli.ctlhi = (period_len >> reg_width);
+- cdesc->desc[i] = desc;
+-
+- if (last)
+- last->lli.llp = desc->txd.phys;
+-
+- last = desc;
+- }
+-
+- /* Let's make a cyclic list */
+- last->lli.llp = cdesc->desc[0]->txd.phys;
+-
+- dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu "
+- "period %zu periods %d\n", (unsigned long long)buf_addr,
+- buf_len, period_len, periods);
+-
+- cdesc->periods = periods;
+- dwc->cdesc = cdesc;
+-
+- return cdesc;
+-
+-out_err_desc_get:
+- while (i--)
+- dwc_desc_put(dwc, cdesc->desc[i]);
+-out_err_alloc:
+- kfree(cdesc);
+-out_err:
+- clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
+- return (struct dw_cyclic_desc *)retval;
+-}
+-EXPORT_SYMBOL(dw_dma_cyclic_prep);
+-
+-/**
+- * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
+- * @chan: the DMA channel to free
+- */
+-void dw_dma_cyclic_free(struct dma_chan *chan)
+-{
+- struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+- struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+- struct dw_cyclic_desc *cdesc = dwc->cdesc;
+- int i;
+- unsigned long flags;
+-
+- dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__);
+-
+- if (!cdesc)
+- return;
+-
+- spin_lock_irqsave(&dwc->lock, flags);
+-
+- dwc_chan_disable(dw, dwc);
+-
+- dma_writel(dw, CLEAR.ERROR, dwc->mask);
+- dma_writel(dw, CLEAR.XFER, dwc->mask);
+-
+- spin_unlock_irqrestore(&dwc->lock, flags);
+-
+- for (i = 0; i < cdesc->periods; i++)
+- dwc_desc_put(dwc, cdesc->desc[i]);
+-
+- kfree(cdesc->desc);
+- kfree(cdesc);
+-
+- clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
+-}
+-EXPORT_SYMBOL(dw_dma_cyclic_free);
+-
+-/*----------------------------------------------------------------------*/
+-
+-static void dw_dma_off(struct dw_dma *dw)
+-{
+- int i;
+-
+- dma_writel(dw, CFG, 0);
+-
+- channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
+- channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
+- channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
+- channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
+-
+- while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
+- cpu_relax();
+-
+- for (i = 0; i < dw->dma.chancnt; i++)
+- dw->chan[i].initialized = false;
+-}
+-
+-#ifdef CONFIG_OF
+-static struct dw_dma_platform_data *
+-dw_dma_parse_dt(struct platform_device *pdev)
+-{
+- struct device_node *np = pdev->dev.of_node;
+- struct dw_dma_platform_data *pdata;
+- u32 tmp, arr[4];
+-
+- if (!np) {
+- dev_err(&pdev->dev, "Missing DT data\n");
+- return NULL;
+- }
+-
+- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+- if (!pdata)
+- return NULL;
+-
+- if (of_property_read_u32(np, "dma-channels", &pdata->nr_channels))
+- return NULL;
+-
+- if (of_property_read_bool(np, "is_private"))
+- pdata->is_private = true;
+-
+- if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
+- pdata->chan_allocation_order = (unsigned char)tmp;
+-
+- if (!of_property_read_u32(np, "chan_priority", &tmp))
+- pdata->chan_priority = tmp;
+-
+- if (!of_property_read_u32(np, "block_size", &tmp))
+- pdata->block_size = tmp;
+-
+- if (!of_property_read_u32(np, "dma-masters", &tmp)) {
+- if (tmp > 4)
+- return NULL;
+-
+- pdata->nr_masters = tmp;
+- }
+-
+- if (!of_property_read_u32_array(np, "data_width", arr,
+- pdata->nr_masters))
+- for (tmp = 0; tmp < pdata->nr_masters; tmp++)
+- pdata->data_width[tmp] = arr[tmp];
+-
+- return pdata;
+-}
+-#else
+-static inline struct dw_dma_platform_data *
+-dw_dma_parse_dt(struct platform_device *pdev)
+-{
+- return NULL;
+-}
+-#endif
+-
+-static int dw_probe(struct platform_device *pdev)
+-{
+- struct dw_dma_platform_data *pdata;
+- struct resource *io;
+- struct dw_dma *dw;
+- size_t size;
+- void __iomem *regs;
+- bool autocfg;
+- unsigned int dw_params;
+- unsigned int nr_channels;
+- unsigned int max_blk_size = 0;
+- int irq;
+- int err;
+- int i;
+-
+- irq = platform_get_irq(pdev, 0);
+- if (irq < 0)
+- return irq;
+-
+- io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+- regs = devm_ioremap_resource(&pdev->dev, io);
+- if (IS_ERR(regs))
+- return PTR_ERR(regs);
+-
+- /* Apply default dma_mask if needed */
+- if (!pdev->dev.dma_mask) {
+- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+- }
+-
+- dw_params = dma_read_byaddr(regs, DW_PARAMS);
+- autocfg = dw_params >> DW_PARAMS_EN & 0x1;
+-
+- dev_dbg(&pdev->dev, "DW_PARAMS: 0x%08x\n", dw_params);
+-
+- pdata = dev_get_platdata(&pdev->dev);
+- if (!pdata)
+- pdata = dw_dma_parse_dt(pdev);
+-
+- if (!pdata && autocfg) {
+- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+- if (!pdata)
+- return -ENOMEM;
+-
+- /* Fill platform data with the default values */
+- pdata->is_private = true;
+- pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
+- pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
+- } else if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
+- return -EINVAL;
+-
+- if (autocfg)
+- nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1;
+- else
+- nr_channels = pdata->nr_channels;
+-
+- size = sizeof(struct dw_dma) + nr_channels * sizeof(struct dw_dma_chan);
+- dw = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+- if (!dw)
+- return -ENOMEM;
+-
+- dw->clk = devm_clk_get(&pdev->dev, "hclk");
+- if (IS_ERR(dw->clk))
+- return PTR_ERR(dw->clk);
+- clk_prepare_enable(dw->clk);
+-
+- dw->regs = regs;
+-
+- /* Get hardware configuration parameters */
+- if (autocfg) {
+- max_blk_size = dma_readl(dw, MAX_BLK_SIZE);
+-
+- dw->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1;
+- for (i = 0; i < dw->nr_masters; i++) {
+- dw->data_width[i] =
+- (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3) + 2;
+- }
+- } else {
+- dw->nr_masters = pdata->nr_masters;
+- memcpy(dw->data_width, pdata->data_width, 4);
+- }
+-
+- /* Calculate all channel mask before DMA setup */
+- dw->all_chan_mask = (1 << nr_channels) - 1;
+-
+- /* Force dma off, just in case */
+- dw_dma_off(dw);
+-
+- /* Disable BLOCK interrupts as well */
+- channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
+-
+- err = devm_request_irq(&pdev->dev, irq, dw_dma_interrupt, 0,
+- "dw_dmac", dw);
+- if (err)
+- return err;
+-
+- platform_set_drvdata(pdev, dw);
+-
+- /* Create a pool of consistent memory blocks for hardware descriptors */
+- dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", &pdev->dev,
+- sizeof(struct dw_desc), 4, 0);
+- if (!dw->desc_pool) {
+- dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
+- return -ENOMEM;
+- }
+-
+- tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
+-
+- INIT_LIST_HEAD(&dw->dma.channels);
+- for (i = 0; i < nr_channels; i++) {
+- struct dw_dma_chan *dwc = &dw->chan[i];
+- int r = nr_channels - i - 1;
+-
+- dwc->chan.device = &dw->dma;
+- dma_cookie_init(&dwc->chan);
+- if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
+- list_add_tail(&dwc->chan.device_node,
+- &dw->dma.channels);
+- else
+- list_add(&dwc->chan.device_node, &dw->dma.channels);
+-
+- /* 7 is highest priority & 0 is lowest. */
+- if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
+- dwc->priority = r;
+- else
+- dwc->priority = i;
+-
+- dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
+- spin_lock_init(&dwc->lock);
+- dwc->mask = 1 << i;
+-
+- INIT_LIST_HEAD(&dwc->active_list);
+- INIT_LIST_HEAD(&dwc->queue);
+- INIT_LIST_HEAD(&dwc->free_list);
+-
+- channel_clear_bit(dw, CH_EN, dwc->mask);
+-
+- dwc->direction = DMA_TRANS_NONE;
+- dwc->request_line = ~0;
+-
+- /* Hardware configuration */
+- if (autocfg) {
+- unsigned int dwc_params;
+-
+- dwc_params = dma_read_byaddr(regs + r * sizeof(u32),
+- DWC_PARAMS);
+-
+- dev_dbg(&pdev->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
+- dwc_params);
+-
+- /* Decode maximum block size for given channel. The
+- * stored 4 bit value represents blocks from 0x00 for 3
+- * up to 0x0a for 4095. */
+- dwc->block_size =
+- (4 << ((max_blk_size >> 4 * i) & 0xf)) - 1;
+- dwc->nollp =
+- (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0;
+- } else {
+- dwc->block_size = pdata->block_size;
+-
+- /* Check if channel supports multi block transfer */
+- channel_writel(dwc, LLP, 0xfffffffc);
+- dwc->nollp =
+- (channel_readl(dwc, LLP) & 0xfffffffc) == 0;
+- channel_writel(dwc, LLP, 0);
+- }
+- }
+-
+- /* Clear all interrupts on all channels. */
+- dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
+- dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
+- dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
+- dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
+- dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
+-
+- dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
+- dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
+- if (pdata->is_private)
+- dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
+- dw->dma.dev = &pdev->dev;
+- dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
+- dw->dma.device_free_chan_resources = dwc_free_chan_resources;
+-
+- dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
+-
+- dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
+- dw->dma.device_control = dwc_control;
+-
+- dw->dma.device_tx_status = dwc_tx_status;
+- dw->dma.device_issue_pending = dwc_issue_pending;
+-
+- dma_writel(dw, CFG, DW_CFG_DMA_EN);
+-
+- dev_info(&pdev->dev, "DesignWare DMA Controller, %d channels\n",
+- nr_channels);
+-
+- dma_async_device_register(&dw->dma);
+-
+- if (pdev->dev.of_node) {
+- err = of_dma_controller_register(pdev->dev.of_node,
+- dw_dma_of_xlate, dw);
+- if (err)
+- dev_err(&pdev->dev,
+- "could not register of_dma_controller\n");
+- }
+-
+- if (ACPI_HANDLE(&pdev->dev))
+- dw_dma_acpi_controller_register(dw);
+-
+- return 0;
+-}
+-
+-static int dw_remove(struct platform_device *pdev)
+-{
+- struct dw_dma *dw = platform_get_drvdata(pdev);
+- struct dw_dma_chan *dwc, *_dwc;
+-
+- if (pdev->dev.of_node)
+- of_dma_controller_free(pdev->dev.of_node);
+- dw_dma_off(dw);
+- dma_async_device_unregister(&dw->dma);
+-
+- tasklet_kill(&dw->tasklet);
+-
+- list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
+- chan.device_node) {
+- list_del(&dwc->chan.device_node);
+- channel_clear_bit(dw, CH_EN, dwc->mask);
+- }
+-
+- return 0;
+-}
+-
+-static void dw_shutdown(struct platform_device *pdev)
+-{
+- struct dw_dma *dw = platform_get_drvdata(pdev);
+-
+- dw_dma_off(dw);
+- clk_disable_unprepare(dw->clk);
+-}
+-
+-static int dw_suspend_noirq(struct device *dev)
+-{
+- struct platform_device *pdev = to_platform_device(dev);
+- struct dw_dma *dw = platform_get_drvdata(pdev);
+-
+- dw_dma_off(dw);
+- clk_disable_unprepare(dw->clk);
+-
+- return 0;
+-}
+-
+-static int dw_resume_noirq(struct device *dev)
+-{
+- struct platform_device *pdev = to_platform_device(dev);
+- struct dw_dma *dw = platform_get_drvdata(pdev);
+-
+- clk_prepare_enable(dw->clk);
+- dma_writel(dw, CFG, DW_CFG_DMA_EN);
+-
+- return 0;
+-}
+-
+-static const struct dev_pm_ops dw_dev_pm_ops = {
+- .suspend_noirq = dw_suspend_noirq,
+- .resume_noirq = dw_resume_noirq,
+- .freeze_noirq = dw_suspend_noirq,
+- .thaw_noirq = dw_resume_noirq,
+- .restore_noirq = dw_resume_noirq,
+- .poweroff_noirq = dw_suspend_noirq,
+-};
+-
+-#ifdef CONFIG_OF
+-static const struct of_device_id dw_dma_of_id_table[] = {
+- { .compatible = "snps,dma-spear1340" },
+- {}
+-};
+-MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
+-#endif
+-
+-#ifdef CONFIG_ACPI
+-static const struct acpi_device_id dw_dma_acpi_id_table[] = {
+- { "INTL9C60", 0 },
+- { }
+-};
+-#endif
+-
+-static struct platform_driver dw_driver = {
+- .probe = dw_probe,
+- .remove = dw_remove,
+- .shutdown = dw_shutdown,
+- .driver = {
+- .name = "dw_dmac",
+- .pm = &dw_dev_pm_ops,
+- .of_match_table = of_match_ptr(dw_dma_of_id_table),
+- .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table),
+- },
+-};
+-
+-static int __init dw_init(void)
+-{
+- return platform_driver_register(&dw_driver);
+-}
+-subsys_initcall(dw_init);
+-
+-static void __exit dw_exit(void)
+-{
+- platform_driver_unregister(&dw_driver);
+-}
+-module_exit(dw_exit);
+-
+-MODULE_LICENSE("GPL v2");
+-MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
+-MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
+-MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
+--- a/drivers/dma/dw_dmac_regs.h
++++ /dev/null
+@@ -1,311 +0,0 @@
+-/*
+- * Driver for the Synopsys DesignWare AHB DMA Controller
+- *
+- * Copyright (C) 2005-2007 Atmel Corporation
+- * Copyright (C) 2010-2011 ST Microelectronics
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+- */
+-
+-#include <linux/dmaengine.h>
+-#include <linux/dw_dmac.h>
+-
+-#define DW_DMA_MAX_NR_CHANNELS 8
+-#define DW_DMA_MAX_NR_REQUESTS 16
+-
+-/* flow controller */
+-enum dw_dma_fc {
+- DW_DMA_FC_D_M2M,
+- DW_DMA_FC_D_M2P,
+- DW_DMA_FC_D_P2M,
+- DW_DMA_FC_D_P2P,
+- DW_DMA_FC_P_P2M,
+- DW_DMA_FC_SP_P2P,
+- DW_DMA_FC_P_M2P,
+- DW_DMA_FC_DP_P2P,
+-};
+-
+-/*
+- * Redefine this macro to handle differences between 32- and 64-bit
+- * addressing, big vs. little endian, etc.
+- */
+-#define DW_REG(name) u32 name; u32 __pad_##name
+-
+-/* Hardware register definitions. */
+-struct dw_dma_chan_regs {
+- DW_REG(SAR); /* Source Address Register */
+- DW_REG(DAR); /* Destination Address Register */
+- DW_REG(LLP); /* Linked List Pointer */
+- u32 CTL_LO; /* Control Register Low */
+- u32 CTL_HI; /* Control Register High */
+- DW_REG(SSTAT);
+- DW_REG(DSTAT);
+- DW_REG(SSTATAR);
+- DW_REG(DSTATAR);
+- u32 CFG_LO; /* Configuration Register Low */
+- u32 CFG_HI; /* Configuration Register High */
+- DW_REG(SGR);
+- DW_REG(DSR);
+-};
+-
+-struct dw_dma_irq_regs {
+- DW_REG(XFER);
+- DW_REG(BLOCK);
+- DW_REG(SRC_TRAN);
+- DW_REG(DST_TRAN);
+- DW_REG(ERROR);
+-};
+-
+-struct dw_dma_regs {
+- /* per-channel registers */
+- struct dw_dma_chan_regs CHAN[DW_DMA_MAX_NR_CHANNELS];
+-
+- /* irq handling */
+- struct dw_dma_irq_regs RAW; /* r */
+- struct dw_dma_irq_regs STATUS; /* r (raw & mask) */
+- struct dw_dma_irq_regs MASK; /* rw (set = irq enabled) */
+- struct dw_dma_irq_regs CLEAR; /* w (ack, affects "raw") */
+-
+- DW_REG(STATUS_INT); /* r */
+-
+- /* software handshaking */
+- DW_REG(REQ_SRC);
+- DW_REG(REQ_DST);
+- DW_REG(SGL_REQ_SRC);
+- DW_REG(SGL_REQ_DST);
+- DW_REG(LAST_SRC);
+- DW_REG(LAST_DST);
+-
+- /* miscellaneous */
+- DW_REG(CFG);
+- DW_REG(CH_EN);
+- DW_REG(ID);
+- DW_REG(TEST);
+-
+- /* reserved */
+- DW_REG(__reserved0);
+- DW_REG(__reserved1);
+-
+- /* optional encoded params, 0x3c8..0x3f7 */
+- u32 __reserved;
+-
+- /* per-channel configuration registers */
+- u32 DWC_PARAMS[DW_DMA_MAX_NR_CHANNELS];
+- u32 MULTI_BLK_TYPE;
+- u32 MAX_BLK_SIZE;
+-
+- /* top-level parameters */
+- u32 DW_PARAMS;
+-};
+-
+-#ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO
+-#define dma_readl_native ioread32be
+-#define dma_writel_native iowrite32be
+-#else
+-#define dma_readl_native readl
+-#define dma_writel_native writel
+-#endif
+-
+-/* To access the registers in early stage of probe */
+-#define dma_read_byaddr(addr, name) \
+- dma_readl_native((addr) + offsetof(struct dw_dma_regs, name))
+-
+-/* Bitfields in DW_PARAMS */
+-#define DW_PARAMS_NR_CHAN 8 /* number of channels */
+-#define DW_PARAMS_NR_MASTER 11 /* number of AHB masters */
+-#define DW_PARAMS_DATA_WIDTH(n) (15 + 2 * (n))
+-#define DW_PARAMS_DATA_WIDTH1 15 /* master 1 data width */
+-#define DW_PARAMS_DATA_WIDTH2 17 /* master 2 data width */
+-#define DW_PARAMS_DATA_WIDTH3 19 /* master 3 data width */
+-#define DW_PARAMS_DATA_WIDTH4 21 /* master 4 data width */
+-#define DW_PARAMS_EN 28 /* encoded parameters */
+-
+-/* Bitfields in DWC_PARAMS */
+-#define DWC_PARAMS_MBLK_EN 11 /* multi block transfer */
+-
+-/* Bitfields in CTL_LO */
+-#define DWC_CTLL_INT_EN (1 << 0) /* irqs enabled? */
+-#define DWC_CTLL_DST_WIDTH(n) ((n)<<1) /* bytes per element */
+-#define DWC_CTLL_SRC_WIDTH(n) ((n)<<4)
+-#define DWC_CTLL_DST_INC (0<<7) /* DAR update/not */
+-#define DWC_CTLL_DST_DEC (1<<7)
+-#define DWC_CTLL_DST_FIX (2<<7)
+-#define DWC_CTLL_SRC_INC (0<<7) /* SAR update/not */
+-#define DWC_CTLL_SRC_DEC (1<<9)
+-#define DWC_CTLL_SRC_FIX (2<<9)
+-#define DWC_CTLL_DST_MSIZE(n) ((n)<<11) /* burst, #elements */
+-#define DWC_CTLL_SRC_MSIZE(n) ((n)<<14)
+-#define DWC_CTLL_S_GATH_EN (1 << 17) /* src gather, !FIX */
+-#define DWC_CTLL_D_SCAT_EN (1 << 18) /* dst scatter, !FIX */
+-#define DWC_CTLL_FC(n) ((n) << 20)
+-#define DWC_CTLL_FC_M2M (0 << 20) /* mem-to-mem */
+-#define DWC_CTLL_FC_M2P (1 << 20) /* mem-to-periph */
+-#define DWC_CTLL_FC_P2M (2 << 20) /* periph-to-mem */
+-#define DWC_CTLL_FC_P2P (3 << 20) /* periph-to-periph */
+-/* plus 4 transfer types for peripheral-as-flow-controller */
+-#define DWC_CTLL_DMS(n) ((n)<<23) /* dst master select */
+-#define DWC_CTLL_SMS(n) ((n)<<25) /* src master select */
+-#define DWC_CTLL_LLP_D_EN (1 << 27) /* dest block chain */
+-#define DWC_CTLL_LLP_S_EN (1 << 28) /* src block chain */
+-
+-/* Bitfields in CTL_HI */
+-#define DWC_CTLH_DONE 0x00001000
+-#define DWC_CTLH_BLOCK_TS_MASK 0x00000fff
+-
+-/* Bitfields in CFG_LO. Platform-configurable bits are in <linux/dw_dmac.h> */
+-#define DWC_CFGL_CH_PRIOR_MASK (0x7 << 5) /* priority mask */
+-#define DWC_CFGL_CH_PRIOR(x) ((x) << 5) /* priority */
+-#define DWC_CFGL_CH_SUSP (1 << 8) /* pause xfer */
+-#define DWC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */
+-#define DWC_CFGL_HS_DST (1 << 10) /* handshake w/dst */
+-#define DWC_CFGL_HS_SRC (1 << 11) /* handshake w/src */
+-#define DWC_CFGL_MAX_BURST(x) ((x) << 20)
+-#define DWC_CFGL_RELOAD_SAR (1 << 30)
+-#define DWC_CFGL_RELOAD_DAR (1 << 31)
+-
+-/* Bitfields in CFG_HI. Platform-configurable bits are in <linux/dw_dmac.h> */
+-#define DWC_CFGH_DS_UPD_EN (1 << 5)
+-#define DWC_CFGH_SS_UPD_EN (1 << 6)
+-
+-/* Bitfields in SGR */
+-#define DWC_SGR_SGI(x) ((x) << 0)
+-#define DWC_SGR_SGC(x) ((x) << 20)
+-
+-/* Bitfields in DSR */
+-#define DWC_DSR_DSI(x) ((x) << 0)
+-#define DWC_DSR_DSC(x) ((x) << 20)
+-
+-/* Bitfields in CFG */
+-#define DW_CFG_DMA_EN (1 << 0)
+-
+-enum dw_dmac_flags {
+- DW_DMA_IS_CYCLIC = 0,
+- DW_DMA_IS_SOFT_LLP = 1,
+-};
+-
+-struct dw_dma_chan {
+- struct dma_chan chan;
+- void __iomem *ch_regs;
+- u8 mask;
+- u8 priority;
+- enum dma_transfer_direction direction;
+- bool paused;
+- bool initialized;
+-
+- /* software emulation of the LLP transfers */
+- struct list_head *tx_node_active;
+-
+- spinlock_t lock;
+-
+- /* these other elements are all protected by lock */
+- unsigned long flags;
+- struct list_head active_list;
+- struct list_head queue;
+- struct list_head free_list;
+- u32 residue;
+- struct dw_cyclic_desc *cdesc;
+-
+- unsigned int descs_allocated;
+-
+- /* hardware configuration */
+- unsigned int block_size;
+- bool nollp;
+-
+- /* custom slave configuration */
+- unsigned int request_line;
+- unsigned char src_master;
+- unsigned char dst_master;
+-
+- /* configuration passed via DMA_SLAVE_CONFIG */
+- struct dma_slave_config dma_sconfig;
+-};
+-
+-static inline struct dw_dma_chan_regs __iomem *
+-__dwc_regs(struct dw_dma_chan *dwc)
+-{
+- return dwc->ch_regs;
+-}
+-
+-#define channel_readl(dwc, name) \
+- dma_readl_native(&(__dwc_regs(dwc)->name))
+-#define channel_writel(dwc, name, val) \
+- dma_writel_native((val), &(__dwc_regs(dwc)->name))
+-
+-static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
+-{
+- return container_of(chan, struct dw_dma_chan, chan);
+-}
+-
+-struct dw_dma {
+- struct dma_device dma;
+- void __iomem *regs;
+- struct dma_pool *desc_pool;
+- struct tasklet_struct tasklet;
+- struct clk *clk;
+-
+- u8 all_chan_mask;
+-
+- /* hardware configuration */
+- unsigned char nr_masters;
+- unsigned char data_width[4];
+-
+- struct dw_dma_chan chan[0];
+-};
+-
+-static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
+-{
+- return dw->regs;
+-}
+-
+-#define dma_readl(dw, name) \
+- dma_readl_native(&(__dw_regs(dw)->name))
+-#define dma_writel(dw, name, val) \
+- dma_writel_native((val), &(__dw_regs(dw)->name))
+-
+-#define channel_set_bit(dw, reg, mask) \
+- dma_writel(dw, reg, ((mask) << 8) | (mask))
+-#define channel_clear_bit(dw, reg, mask) \
+- dma_writel(dw, reg, ((mask) << 8) | 0)
+-
+-static inline struct dw_dma *to_dw_dma(struct dma_device *ddev)
+-{
+- return container_of(ddev, struct dw_dma, dma);
+-}
+-
+-/* LLI == Linked List Item; a.k.a. DMA block descriptor */
+-struct dw_lli {
+- /* values that are not changed by hardware */
+- u32 sar;
+- u32 dar;
+- u32 llp; /* chain to next lli */
+- u32 ctllo;
+- /* values that may get written back: */
+- u32 ctlhi;
+- /* sstat and dstat can snapshot peripheral register state.
+- * silicon config may discard either or both...
+- */
+- u32 sstat;
+- u32 dstat;
+-};
+-
+-struct dw_desc {
+- /* FIRST values the hardware uses */
+- struct dw_lli lli;
+-
+- /* THEN values for driver housekeeping */
+- struct list_head desc_node;
+- struct list_head tx_list;
+- struct dma_async_tx_descriptor txd;
+- size_t len;
+- size_t total_len;
+-};
+-
+-#define to_dw_desc(h) list_entry(h, struct dw_desc, desc_node)
+-
+-static inline struct dw_desc *
+-txd_to_dw_desc(struct dma_async_tx_descriptor *txd)
+-{
+- return container_of(txd, struct dw_desc, txd);
+-}