fuzz cleanup in the baytrail patches and some others
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 21 Dec 2013 23:47:29 +0000 (15:47 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 21 Dec 2013 23:47:29 +0000 (15:47 -0800)
patches.baytrail/0008-usb-Don-t-enable-USB-2.0-Link-PM-by-default.patch
patches.baytrail/0520-drm-gem-convert-to-new-unified-vma-manager.patch
patches.baytrail/0596-drm-gem-create-drm_gem_dumb_destroy.patch
patches.baytrail/1123-drivers-i2c-busses-don-t-check-resource-with-devm_io.patch
patches.baytrail/1137-dma-move-dw_dmac-driver-to-an-own-directory.patch
patches.baytrail/1170-i2c-use-dev_get_platdata.patch
patches.baytrail/1191-ARM-EXYNOS-Select-PINCTRL_EXYNOS-for-exynos4-5-at-ch.patch
patches.zynq/0010-ARM-zynq-Add-cpuidle-support.patch

index e4084d6d7d292a7a9eb9ab89aca3f4efaa04cd73..333541c21de791392721506d07be7b5236f0fc86 100644 (file)
@@ -75,19 +75,17 @@ Cc: stable@vger.kernel.org
 (cherry picked from commit de68bab4fa96014cfaa6fcbcdb9750e32969fb86)
 Signed-off-by: Darren Hart <dvhart@linux.intel.com>
 ---
- drivers/usb/core/driver.c   |   3 +
- drivers/usb/core/hub.c      |   1 +
- drivers/usb/core/sysfs.c    |   6 +-
- drivers/usb/host/xhci-mem.c |  10 ---
- drivers/usb/host/xhci.c     | 161 +++++---------------------------------------
- include/linux/usb.h         |   4 +-
+ drivers/usb/core/driver.c   |    3 
+ drivers/usb/core/hub.c      |    1 
+ drivers/usb/core/sysfs.c    |    6 +
+ drivers/usb/host/xhci-mem.c |   10 --
+ drivers/usb/host/xhci.c     |  161 ++++----------------------------------------
+ include/linux/usb.h         |    4 -
  6 files changed, 29 insertions(+), 156 deletions(-)
 
-diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
-index 6eab440e1542..bed14edd8ef5 100644
 --- a/drivers/usb/core/driver.c
 +++ b/drivers/usb/core/driver.c
-@@ -1773,6 +1773,9 @@ int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
+@@ -1773,6 +1773,9 @@ int usb_set_usb2_hardware_lpm(struct usb
        struct usb_hcd *hcd = bus_to_hcd(udev->bus);
        int ret = -EPERM;
  
@@ -97,11 +95,9 @@ index 6eab440e1542..bed14edd8ef5 100644
        if (hcd->driver->set_usb2_hw_lpm) {
                ret = hcd->driver->set_usb2_hw_lpm(hcd, udev, enable);
                if (!ret)
-diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
-index 1424a8988849..a5a1fcc09e9d 100644
 --- a/drivers/usb/core/hub.c
 +++ b/drivers/usb/core/hub.c
-@@ -5186,6 +5186,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
+@@ -5187,6 +5187,7 @@ static int usb_reset_and_verify_device(s
  
  done:
        /* Now that the alt settings are re-installed, enable LTM and LPM. */
@@ -109,11 +105,9 @@ index 1424a8988849..a5a1fcc09e9d 100644
        usb_unlocked_enable_lpm(udev);
        usb_enable_ltm(udev);
        return 0;
-diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
-index d9284b998bd7..9e6f9a945026 100644
 --- a/drivers/usb/core/sysfs.c
 +++ b/drivers/usb/core/sysfs.c
-@@ -463,7 +463,7 @@ show_usb2_hardware_lpm(struct device *dev, struct device_attribute *attr,
+@@ -463,7 +463,7 @@ show_usb2_hardware_lpm(struct device *de
        struct usb_device *udev = to_usb_device(dev);
        const char *p;
  
@@ -122,7 +116,7 @@ index d9284b998bd7..9e6f9a945026 100644
                p = "enabled";
        else
                p = "disabled";
-@@ -483,8 +483,10 @@ set_usb2_hardware_lpm(struct device *dev, struct device_attribute *attr,
+@@ -483,8 +483,10 @@ set_usb2_hardware_lpm(struct device *dev
  
        ret = strtobool(buf, &value);
  
@@ -134,11 +128,9 @@ index d9284b998bd7..9e6f9a945026 100644
  
        usb_unlock_device(udev);
  
-diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
-index 0fc539dc1cb8..27062e6010c2 100644
 --- a/drivers/usb/host/xhci-mem.c
 +++ b/drivers/usb/host/xhci-mem.c
-@@ -1763,9 +1763,7 @@ void xhci_free_command(struct xhci_hcd *xhci,
+@@ -1763,9 +1763,7 @@ void xhci_free_command(struct xhci_hcd *
  void xhci_mem_cleanup(struct xhci_hcd *xhci)
  {
        struct pci_dev  *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
@@ -148,7 +140,7 @@ index 0fc539dc1cb8..27062e6010c2 100644
        int size;
        int i, j, num_ports;
  
-@@ -1824,13 +1822,6 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
+@@ -1824,13 +1822,6 @@ void xhci_mem_cleanup(struct xhci_hcd *x
  
        scratchpad_free(xhci);
  
@@ -162,7 +154,7 @@ index 0fc539dc1cb8..27062e6010c2 100644
        if (!xhci->rh_bw)
                goto no_bw;
  
-@@ -2289,7 +2280,6 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
+@@ -2289,7 +2280,6 @@ int xhci_mem_init(struct xhci_hcd *xhci,
        u32 page_size, temp;
        int i;
  
@@ -170,11 +162,9 @@ index 0fc539dc1cb8..27062e6010c2 100644
        INIT_LIST_HEAD(&xhci->cancel_cmd_list);
  
        page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
-diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
-index a228b796c300..10b15d3aa117 100644
 --- a/drivers/usb/host/xhci.c
 +++ b/drivers/usb/host/xhci.c
-@@ -3940,133 +3940,6 @@ static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev)
+@@ -3940,133 +3940,6 @@ static int xhci_calculate_usb2_hw_lpm_pa
        return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm);
  }
  
@@ -308,7 +298,7 @@ index a228b796c300..10b15d3aa117 100644
  int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
                        struct usb_device *udev, int enable)
  {
-@@ -4194,24 +4067,26 @@ static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port,
+@@ -4194,24 +4067,26 @@ static int xhci_check_usb2_port_capabili
  int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
  {
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
@@ -351,8 +341,6 @@ index a228b796c300..10b15d3aa117 100644
        }
  
        return 0;
-diff --git a/include/linux/usb.h b/include/linux/usb.h
-index 1f7d63f3ab8b..0ffae80ab1e5 100644
 --- a/include/linux/usb.h
 +++ b/include/linux/usb.h
 @@ -485,7 +485,8 @@ struct usb3_lpm_parameters {
@@ -373,6 +361,3 @@ index 1f7d63f3ab8b..0ffae80ab1e5 100644
        unsigned usb3_lpm_enabled:1;
        int string_langid;
  
--- 
-1.8.5.rc3
-
index 1ef454237dc32e5736d03ba5f2edaa36573b6195..640b3607f1f2d1831694c06ddf4bcac7ab4a8905 100644 (file)
@@ -35,21 +35,19 @@ Signed-off-by: Dave Airlie <airlied@gmail.com>
 (cherry picked from commit 0de23977cfeb5b357ec884ba15417ae118ff9e9b)
 Signed-off-by: Darren Hart <dvhart@linux.intel.com>
 ---
- drivers/gpu/drm/drm_gem.c                  | 89 +++++-------------------------
- drivers/gpu/drm/drm_gem_cma_helper.c       | 16 ++----
- drivers/gpu/drm/exynos/exynos_drm_gem.c    | 14 ++---
- drivers/gpu/drm/gma500/gem.c               | 15 ++---
- drivers/gpu/drm/i915/i915_gem.c            | 10 ++--
- drivers/gpu/drm/omapdrm/omap_gem.c         | 28 +++++-----
- drivers/gpu/drm/omapdrm/omap_gem_helpers.c | 49 +---------------
- drivers/gpu/drm/udl/udl_gem.c              | 13 ++---
- drivers/gpu/host1x/drm/gem.c               |  5 +-
- include/drm/drmP.h                         |  7 +--
- include/uapi/drm/drm.h                     |  2 +-
+ drivers/gpu/drm/drm_gem.c                  |   89 ++++-------------------------
+ drivers/gpu/drm/drm_gem_cma_helper.c       |   16 +----
+ drivers/gpu/drm/exynos/exynos_drm_gem.c    |   14 +---
+ drivers/gpu/drm/gma500/gem.c               |   15 +---
+ drivers/gpu/drm/i915/i915_gem.c            |   10 +--
+ drivers/gpu/drm/omapdrm/omap_gem.c         |   28 ++++-----
+ drivers/gpu/drm/omapdrm/omap_gem_helpers.c |   49 ---------------
+ drivers/gpu/drm/udl/udl_gem.c              |   13 +---
+ drivers/gpu/host1x/drm/gem.c               |    5 -
+ include/drm/drmP.h                         |    7 --
+ include/uapi/drm/drm.h                     |    2 
  11 files changed, 62 insertions(+), 186 deletions(-)
 
-diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
-index df6c89ec27ec..d1ba36512fe4 100644
 --- a/drivers/gpu/drm/drm_gem.c
 +++ b/drivers/gpu/drm/drm_gem.c
 @@ -37,6 +37,7 @@
@@ -88,7 +86,7 @@ index df6c89ec27ec..d1ba36512fe4 100644
        kfree(mm);
        dev->mm_private = NULL;
  }
-@@ -302,12 +297,8 @@ drm_gem_free_mmap_offset(struct drm_gem_object *obj)
+@@ -302,12 +297,8 @@ drm_gem_free_mmap_offset(struct drm_gem_
  {
        struct drm_device *dev = obj->dev;
        struct drm_gem_mm *mm = dev->mm_private;
@@ -102,7 +100,7 @@ index df6c89ec27ec..d1ba36512fe4 100644
  }
  EXPORT_SYMBOL(drm_gem_free_mmap_offset);
  
-@@ -327,54 +318,9 @@ drm_gem_create_mmap_offset(struct drm_gem_object *obj)
+@@ -327,54 +318,9 @@ drm_gem_create_mmap_offset(struct drm_ge
  {
        struct drm_device *dev = obj->dev;
        struct drm_gem_mm *mm = dev->mm_private;
@@ -130,7 +128,7 @@ index df6c89ec27ec..d1ba36512fe4 100644
 -              ret = -ENOSPC;
 -              goto out_free_list;
 -      }
+-
 -      list->file_offset_node = drm_mm_get_block(list->file_offset_node,
 -                      obj->size / PAGE_SIZE, 0);
 -      if (!list->file_offset_node) {
@@ -146,7 +144,7 @@ index df6c89ec27ec..d1ba36512fe4 100644
 -      }
 -
 -      return 0;
--
 -out_free_mm:
 -      drm_mm_put_block(list->file_offset_node);
 -out_free_list:
@@ -159,7 +157,7 @@ index df6c89ec27ec..d1ba36512fe4 100644
  }
  EXPORT_SYMBOL(drm_gem_create_mmap_offset);
  
-@@ -699,8 +645,8 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+@@ -699,8 +645,8 @@ int drm_gem_mmap(struct file *filp, stru
        struct drm_file *priv = filp->private_data;
        struct drm_device *dev = priv->minor->dev;
        struct drm_gem_mm *mm = dev->mm_private;
@@ -170,7 +168,7 @@ index df6c89ec27ec..d1ba36512fe4 100644
        int ret = 0;
  
        if (drm_device_is_unplugged(dev))
-@@ -708,21 +654,16 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+@@ -708,21 +654,16 @@ int drm_gem_mmap(struct file *filp, stru
  
        mutex_lock(&dev->struct_mutex);
  
@@ -197,11 +195,9 @@ index df6c89ec27ec..d1ba36512fe4 100644
        mutex_unlock(&dev->struct_mutex);
  
        return ret;
-diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
-index 0a7e011509bd..11b616ef9dc2 100644
 --- a/drivers/gpu/drm/drm_gem_cma_helper.c
 +++ b/drivers/gpu/drm/drm_gem_cma_helper.c
-@@ -26,11 +26,7 @@
+@@ -27,11 +27,7 @@
  #include <drm/drmP.h>
  #include <drm/drm.h>
  #include <drm/drm_gem_cma_helper.h>
@@ -212,9 +208,9 @@ index 0a7e011509bd..11b616ef9dc2 100644
 -}
 +#include <drm/drm_vma_manager.h>
  
- static void drm_gem_cma_buf_destroy(struct drm_device *drm,
-               struct drm_gem_cma_object *cma_obj)
-@@ -140,8 +136,7 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
+ /*
+  * __drm_gem_cma_create - Create a GEM CMA object without allocating memory
+@@ -172,8 +168,7 @@ void drm_gem_cma_free_object(struct drm_
  {
        struct drm_gem_cma_object *cma_obj;
  
@@ -222,9 +218,9 @@ index 0a7e011509bd..11b616ef9dc2 100644
 -              drm_gem_free_mmap_offset(gem_obj);
 +      drm_gem_free_mmap_offset(gem_obj);
  
-       drm_gem_object_release(gem_obj);
+       cma_obj = to_drm_gem_cma_obj(gem_obj);
  
-@@ -199,7 +194,7 @@ int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
+@@ -240,7 +235,7 @@ int drm_gem_cma_dumb_map_offset(struct d
                return -EINVAL;
        }
  
@@ -233,7 +229,7 @@ index 0a7e011509bd..11b616ef9dc2 100644
  
        drm_gem_object_unreference(gem_obj);
  
-@@ -255,12 +250,11 @@ void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m
+@@ -304,12 +299,11 @@ void drm_gem_cma_describe(struct drm_gem
  {
        struct drm_gem_object *obj = &cma_obj->base;
        struct drm_device *dev = obj->dev;
@@ -248,8 +244,6 @@ index 0a7e011509bd..11b616ef9dc2 100644
  
        seq_printf(m, "%2d (%2d) %08llx %08Zx %p %d",
                        obj->name, obj->refcount.refcount.counter,
-diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
-index cf4543ffa079..408b71f4c95e 100644
 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
 +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
 @@ -10,6 +10,7 @@
@@ -270,7 +264,7 @@ index cf4543ffa079..408b71f4c95e 100644
  
        /* release file pointer to gem object. */
        drm_gem_object_release(obj);
-@@ -721,13 +721,11 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
+@@ -721,13 +721,11 @@ int exynos_drm_gem_dumb_map_offset(struc
                goto unlock;
        }
  
@@ -288,8 +282,6 @@ index cf4543ffa079..408b71f4c95e 100644
        DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
  
  out:
-diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
-index fe1d3320ce6a..2f77bea30b11 100644
 --- a/drivers/gpu/drm/gma500/gem.c
 +++ b/drivers/gpu/drm/gma500/gem.c
 @@ -26,6 +26,7 @@
@@ -300,7 +292,7 @@ index fe1d3320ce6a..2f77bea30b11 100644
  #include "psb_drv.h"
  
  int psb_gem_init_object(struct drm_gem_object *obj)
-@@ -38,8 +39,7 @@ void psb_gem_free_object(struct drm_gem_object *obj)
+@@ -38,8 +39,7 @@ void psb_gem_free_object(struct drm_gem_
        struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
  
        /* Remove the list map if one is present */
@@ -310,7 +302,7 @@ index fe1d3320ce6a..2f77bea30b11 100644
        drm_gem_object_release(obj);
  
        /* This must occur last as it frees up the memory of the GEM object */
-@@ -81,13 +81,10 @@ int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
+@@ -81,13 +81,10 @@ int psb_gem_dumb_map_gtt(struct drm_file
        /* What validation is needed here ? */
  
        /* Make it mmapable */
@@ -328,8 +320,6 @@ index fe1d3320ce6a..2f77bea30b11 100644
  out:
        drm_gem_object_unreference(obj);
  unlock:
-diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
-index 050eb9b92595..607dc675840e 100644
 --- a/drivers/gpu/drm/i915/i915_gem.c
 +++ b/drivers/gpu/drm/i915/i915_gem.c
 @@ -26,6 +26,7 @@
@@ -340,7 +330,7 @@ index 050eb9b92595..607dc675840e 100644
  #include <drm/i915_drm.h>
  #include "i915_drv.h"
  #include "i915_trace.h"
-@@ -1424,7 +1425,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
+@@ -1424,7 +1425,7 @@ i915_gem_release_mmap(struct drm_i915_ge
  
        if (obj->base.dev->dev_mapping)
                unmap_mapping_range(obj->base.dev->dev_mapping,
@@ -349,7 +339,7 @@ index 050eb9b92595..607dc675840e 100644
                                    obj->base.size, 1);
  
        obj->fault_mappable = false;
-@@ -1482,7 +1483,7 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
+@@ -1482,7 +1483,7 @@ static int i915_gem_object_create_mmap_o
        struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
        int ret;
  
@@ -377,8 +367,6 @@ index 050eb9b92595..607dc675840e 100644
  
  out:
        drm_gem_object_unreference(&obj->base);
-diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
-index cbcd71e6ed83..f90531fc00c9 100644
 --- a/drivers/gpu/drm/omapdrm/omap_gem.c
 +++ b/drivers/gpu/drm/omapdrm/omap_gem.c
 @@ -20,6 +20,7 @@
@@ -389,7 +377,7 @@ index cbcd71e6ed83..f90531fc00c9 100644
  
  #include "omap_drv.h"
  #include "omap_dmm_tiler.h"
-@@ -308,21 +309,20 @@ uint32_t omap_gem_flags(struct drm_gem_object *obj)
+@@ -308,21 +309,20 @@ uint32_t omap_gem_flags(struct drm_gem_o
  static uint64_t mmap_offset(struct drm_gem_object *obj)
  {
        struct drm_device *dev = obj->dev;
@@ -420,7 +408,7 @@ index cbcd71e6ed83..f90531fc00c9 100644
  }
  
  uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
-@@ -997,12 +997,11 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
+@@ -997,12 +997,11 @@ void omap_gem_describe(struct drm_gem_ob
  {
        struct drm_device *dev = obj->dev;
        struct omap_gem_object *omap_obj = to_omap_bo(obj);
@@ -435,7 +423,7 @@ index cbcd71e6ed83..f90531fc00c9 100644
  
        seq_printf(m, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d",
                        omap_obj->flags, obj->name, obj->refcount.refcount.counter,
-@@ -1309,8 +1308,7 @@ void omap_gem_free_object(struct drm_gem_object *obj)
+@@ -1309,8 +1308,7 @@ void omap_gem_free_object(struct drm_gem
  
        list_del(&omap_obj->mm_list);
  
@@ -445,18 +433,16 @@ index cbcd71e6ed83..f90531fc00c9 100644
  
        /* this means the object is still pinned.. which really should
         * not happen.  I think..
-diff --git a/drivers/gpu/drm/omapdrm/omap_gem_helpers.c b/drivers/gpu/drm/omapdrm/omap_gem_helpers.c
-index f9eb679eb79b..dbb157542f8f 100644
 --- a/drivers/gpu/drm/omapdrm/omap_gem_helpers.c
 +++ b/drivers/gpu/drm/omapdrm/omap_gem_helpers.c
-@@ -118,52 +118,7 @@ _drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
+@@ -118,52 +118,7 @@ _drm_gem_create_mmap_offset_size(struct
  {
        struct drm_device *dev = obj->dev;
        struct drm_gem_mm *mm = dev->mm_private;
 -      struct drm_map_list *list;
 -      struct drm_local_map *map;
 -      int ret = 0;
--
 -      /* Set the object up for mmap'ing */
 -      list = &obj->map_list;
 -      list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
@@ -499,16 +485,14 @@ index f9eb679eb79b..dbb157542f8f 100644
 -out_free_list:
 -      kfree(list->map);
 -      list->map = NULL;
+-
 -      return ret;
 +      return drm_vma_offset_add(&mm->vma_manager, &obj->vma_node,
 +                                size / PAGE_SIZE);
  }
-diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
-index ef034fa3e6f5..2a4cb2f83b36 100644
 --- a/drivers/gpu/drm/udl/udl_gem.c
 +++ b/drivers/gpu/drm/udl/udl_gem.c
-@@ -223,8 +223,7 @@ void udl_gem_free_object(struct drm_gem_object *gem_obj)
+@@ -223,8 +223,7 @@ void udl_gem_free_object(struct drm_gem_
        if (obj->pages)
                udl_gem_put_pages(obj);
  
@@ -518,7 +502,7 @@ index ef034fa3e6f5..2a4cb2f83b36 100644
  }
  
  /* the dumb interface doesn't work with the GEM straight MMAP
-@@ -247,13 +246,11 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
+@@ -247,13 +246,11 @@ int udl_gem_mmap(struct drm_file *file,
        ret = udl_gem_get_pages(gobj, GFP_KERNEL);
        if (ret)
                goto out;
@@ -536,11 +520,9 @@ index ef034fa3e6f5..2a4cb2f83b36 100644
  
  out:
        drm_gem_object_unreference(&gobj->base);
-diff --git a/drivers/gpu/host1x/drm/gem.c b/drivers/gpu/host1x/drm/gem.c
-index c5e9a9b494c2..bc323b3dbe4d 100644
 --- a/drivers/gpu/host1x/drm/gem.c
 +++ b/drivers/gpu/host1x/drm/gem.c
-@@ -108,7 +108,7 @@ static void tegra_bo_destroy(struct drm_device *drm, struct tegra_bo *bo)
+@@ -108,7 +108,7 @@ static void tegra_bo_destroy(struct drm_
  
  unsigned int tegra_bo_get_mmap_offset(struct tegra_bo *bo)
  {
@@ -549,7 +531,7 @@ index c5e9a9b494c2..bc323b3dbe4d 100644
  }
  
  struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size)
-@@ -182,8 +182,7 @@ void tegra_bo_free_object(struct drm_gem_object *gem)
+@@ -182,8 +182,7 @@ void tegra_bo_free_object(struct drm_gem
  {
        struct tegra_bo *bo = to_tegra_bo(gem);
  
@@ -559,8 +541,6 @@ index c5e9a9b494c2..bc323b3dbe4d 100644
  
        drm_gem_object_release(gem);
        tegra_bo_destroy(gem->dev, bo);
-diff --git a/include/drm/drmP.h b/include/drm/drmP.h
-index 5ff88ad7b23c..bf677c0b4cae 100644
 --- a/include/drm/drmP.h
 +++ b/include/drm/drmP.h
 @@ -74,6 +74,7 @@
@@ -598,8 +578,6 @@ index 5ff88ad7b23c..bf677c0b4cae 100644
  
        /**
         * Size of the object, in bytes.  Immutable over the object's
-diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
-index 5a57be68bab7..1d1c6f03021e 100644
 --- a/include/uapi/drm/drm.h
 +++ b/include/uapi/drm/drm.h
 @@ -181,7 +181,7 @@ enum drm_map_type {
@@ -611,6 +589,3 @@ index 5a57be68bab7..1d1c6f03021e 100644
  };
  
  /**
--- 
-1.8.5.rc3
-
index c846b3f54c61156c0c6e8dce3a268cd2fb5ece09..db964ac123212c3542d1900adc0a87505e564f8c 100644 (file)
@@ -31,53 +31,51 @@ Conflicts:
        (we don't have this driver in our tree)
 Signed-off-by: Darren Hart <dvhart@linux.intel.com>
 ---
- drivers/gpu/drm/ast/ast_drv.c             |  2 +-
- drivers/gpu/drm/ast/ast_drv.h             |  3 ---
- drivers/gpu/drm/ast/ast_main.c            |  7 -------
- drivers/gpu/drm/cirrus/cirrus_drv.c       |  2 +-
- drivers/gpu/drm/cirrus/cirrus_drv.h       |  3 ---
- drivers/gpu/drm/cirrus/cirrus_main.c      |  7 -------
- drivers/gpu/drm/drm_gem.c                 | 14 ++++++++++++++
- drivers/gpu/drm/drm_gem_cma_helper.c      | 10 ----------
- drivers/gpu/drm/exynos/exynos_drm_drv.c   |  2 +-
- drivers/gpu/drm/exynos/exynos_drm_gem.c   | 22 ----------------------
- drivers/gpu/drm/exynos/exynos_drm_gem.h   |  9 ---------
- drivers/gpu/drm/gma500/gem.c              | 17 -----------------
- drivers/gpu/drm/gma500/psb_drv.c          |  2 +-
- drivers/gpu/drm/gma500/psb_drv.h          |  2 --
- drivers/gpu/drm/i915/i915_drv.c           |  2 +-
- drivers/gpu/drm/i915/i915_drv.h           |  2 --
- drivers/gpu/drm/i915/i915_gem.c           |  7 -------
- drivers/gpu/drm/mgag200/mgag200_drv.c     |  2 +-
- drivers/gpu/drm/mgag200/mgag200_drv.h     |  3 ---
- drivers/gpu/drm/mgag200/mgag200_main.c    |  7 -------
- drivers/gpu/drm/nouveau/nouveau_display.c |  7 -------
- drivers/gpu/drm/nouveau/nouveau_display.h |  2 --
- drivers/gpu/drm/nouveau/nouveau_drm.c     |  2 +-
- drivers/gpu/drm/omapdrm/omap_drv.c        |  2 +-
- drivers/gpu/drm/omapdrm/omap_drv.h        |  2 --
- drivers/gpu/drm/omapdrm/omap_gem.c        | 15 ---------------
- drivers/gpu/drm/qxl/qxl_drv.c             |  2 +-
- drivers/gpu/drm/qxl/qxl_drv.h             |  3 ---
- drivers/gpu/drm/qxl/qxl_dumb.c            |  7 -------
- drivers/gpu/drm/radeon/radeon.h           |  3 ---
- drivers/gpu/drm/radeon/radeon_drv.c       |  5 +----
- drivers/gpu/drm/radeon/radeon_gem.c       |  7 -------
- drivers/gpu/drm/shmobile/shmob_drm_drv.c  |  2 +-
- drivers/gpu/drm/tilcdc/tilcdc_drv.c       |  2 +-
- drivers/gpu/drm/udl/udl_drv.c             |  2 +-
- drivers/gpu/drm/udl/udl_drv.h             |  2 --
- drivers/gpu/drm/udl/udl_gem.c             |  6 ------
- drivers/gpu/host1x/drm/drm.c              |  2 +-
- drivers/gpu/host1x/drm/gem.c              |  6 ------
- drivers/gpu/host1x/drm/gem.h              |  2 --
- drivers/staging/imx-drm/imx-drm-core.c    |  2 +-
- include/drm/drmP.h                        |  3 +++
- include/drm/drm_gem_cma_helper.h          |  8 --------
+ drivers/gpu/drm/ast/ast_drv.c             |    2 +-
+ drivers/gpu/drm/ast/ast_drv.h             |    3 ---
+ drivers/gpu/drm/ast/ast_main.c            |    7 -------
+ drivers/gpu/drm/cirrus/cirrus_drv.c       |    2 +-
+ drivers/gpu/drm/cirrus/cirrus_drv.h       |    3 ---
+ drivers/gpu/drm/cirrus/cirrus_main.c      |    7 -------
+ drivers/gpu/drm/drm_gem.c                 |   14 ++++++++++++++
+ drivers/gpu/drm/drm_gem_cma_helper.c      |   10 ----------
+ drivers/gpu/drm/exynos/exynos_drm_drv.c   |    2 +-
+ drivers/gpu/drm/exynos/exynos_drm_gem.c   |   22 ----------------------
+ drivers/gpu/drm/exynos/exynos_drm_gem.h   |    9 ---------
+ drivers/gpu/drm/gma500/gem.c              |   17 -----------------
+ drivers/gpu/drm/gma500/psb_drv.c          |    2 +-
+ drivers/gpu/drm/gma500/psb_drv.h          |    2 --
+ drivers/gpu/drm/i915/i915_drv.c           |    2 +-
+ drivers/gpu/drm/i915/i915_drv.h           |    2 --
+ drivers/gpu/drm/i915/i915_gem.c           |    7 -------
+ drivers/gpu/drm/mgag200/mgag200_drv.c     |    2 +-
+ drivers/gpu/drm/mgag200/mgag200_drv.h     |    3 ---
+ drivers/gpu/drm/mgag200/mgag200_main.c    |    7 -------
+ drivers/gpu/drm/nouveau/nouveau_display.c |    7 -------
+ drivers/gpu/drm/nouveau/nouveau_display.h |    2 --
+ drivers/gpu/drm/nouveau/nouveau_drm.c     |    2 +-
+ drivers/gpu/drm/omapdrm/omap_drv.c        |    2 +-
+ drivers/gpu/drm/omapdrm/omap_drv.h        |    2 --
+ drivers/gpu/drm/omapdrm/omap_gem.c        |   15 ---------------
+ drivers/gpu/drm/qxl/qxl_drv.c             |    2 +-
+ drivers/gpu/drm/qxl/qxl_drv.h             |    3 ---
+ drivers/gpu/drm/qxl/qxl_dumb.c            |    7 -------
+ drivers/gpu/drm/radeon/radeon.h           |    3 ---
+ drivers/gpu/drm/radeon/radeon_drv.c       |    5 +----
+ drivers/gpu/drm/radeon/radeon_gem.c       |    7 -------
+ drivers/gpu/drm/shmobile/shmob_drm_drv.c  |    2 +-
+ drivers/gpu/drm/tilcdc/tilcdc_drv.c       |    2 +-
+ drivers/gpu/drm/udl/udl_drv.c             |    2 +-
+ drivers/gpu/drm/udl/udl_drv.h             |    2 --
+ drivers/gpu/drm/udl/udl_gem.c             |    6 ------
+ drivers/gpu/host1x/drm/drm.c              |    2 +-
+ drivers/gpu/host1x/drm/gem.c              |    6 ------
+ drivers/gpu/host1x/drm/gem.h              |    2 --
+ drivers/staging/imx-drm/imx-drm-core.c    |    2 +-
+ include/drm/drmP.h                        |    3 +++
+ include/drm/drm_gem_cma_helper.h          |    8 --------
  43 files changed, 32 insertions(+), 187 deletions(-)
 
-diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
-index df0d0a08097a..a144fb044852 100644
 --- a/drivers/gpu/drm/ast/ast_drv.c
 +++ b/drivers/gpu/drm/ast/ast_drv.c
 @@ -216,7 +216,7 @@ static struct drm_driver driver = {
@@ -89,8 +87,6 @@ index df0d0a08097a..a144fb044852 100644
  
  };
  
-diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
-index b6b7d70f2832..68e1d324005a 100644
 --- a/drivers/gpu/drm/ast/ast_drv.h
 +++ b/drivers/gpu/drm/ast/ast_drv.h
 @@ -322,9 +322,6 @@ ast_bo(struct ttm_buffer_object *bo)
@@ -103,11 +99,9 @@ index b6b7d70f2832..68e1d324005a 100644
  
  extern int ast_gem_init_object(struct drm_gem_object *obj);
  extern void ast_gem_free_object(struct drm_gem_object *obj);
-diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
-index c195dc2abc09..7f6152d374ca 100644
 --- a/drivers/gpu/drm/ast/ast_main.c
 +++ b/drivers/gpu/drm/ast/ast_main.c
-@@ -449,13 +449,6 @@ int ast_dumb_create(struct drm_file *file,
+@@ -449,13 +449,6 @@ int ast_dumb_create(struct drm_file *fil
        return 0;
  }
  
@@ -121,8 +115,6 @@ index c195dc2abc09..7f6152d374ca 100644
  int ast_gem_init_object(struct drm_gem_object *obj)
  {
        BUG();
-diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
-index 8ecb601152ef..d35d99c15f84 100644
 --- a/drivers/gpu/drm/cirrus/cirrus_drv.c
 +++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
 @@ -102,7 +102,7 @@ static struct drm_driver driver = {
@@ -134,11 +126,9 @@ index 8ecb601152ef..d35d99c15f84 100644
  };
  
  static struct pci_driver cirrus_pci_driver = {
-diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
-index 7ca059596887..33a0f991b0fc 100644
 --- a/drivers/gpu/drm/cirrus/cirrus_drv.h
 +++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
-@@ -203,9 +203,6 @@ int cirrus_gem_create(struct drm_device *dev,
+@@ -203,9 +203,6 @@ int cirrus_gem_create(struct drm_device
  int cirrus_dumb_create(struct drm_file *file,
                    struct drm_device *dev,
                       struct drm_mode_create_dumb *args);
@@ -148,11 +138,9 @@ index 7ca059596887..33a0f991b0fc 100644
  
  int cirrus_framebuffer_init(struct drm_device *dev,
                           struct cirrus_framebuffer *gfb,
-diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
-index 3a7a0efe3675..f130a533a512 100644
 --- a/drivers/gpu/drm/cirrus/cirrus_main.c
 +++ b/drivers/gpu/drm/cirrus/cirrus_main.c
-@@ -255,13 +255,6 @@ int cirrus_dumb_create(struct drm_file *file,
+@@ -255,13 +255,6 @@ int cirrus_dumb_create(struct drm_file *
        return 0;
  }
  
@@ -166,16 +154,14 @@ index 3a7a0efe3675..f130a533a512 100644
  int cirrus_gem_init_object(struct drm_gem_object *obj)
  {
        BUG();
-diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
-index 2688795172f9..ee9ddc856710 100644
 --- a/drivers/gpu/drm/drm_gem.c
 +++ b/drivers/gpu/drm/drm_gem.c
-@@ -244,6 +244,20 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
+@@ -244,6 +244,20 @@ drm_gem_handle_delete(struct drm_file *f
  EXPORT_SYMBOL(drm_gem_handle_delete);
  
  /**
 + * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
-+ * 
++ *
 + * This implements the ->dumb_destroy kms driver callback for drivers which use
 + * gem to manage their backing storage.
 + */
@@ -191,11 +177,9 @@ index 2688795172f9..ee9ddc856710 100644
   * Create a handle for this object. This adds a handle reference
   * to the object, which includes a regular reference count. Callers
   * will likely want to dereference the object afterwards.
-diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
-index 11b616ef9dc2..3ec218376734 100644
 --- a/drivers/gpu/drm/drm_gem_cma_helper.c
 +++ b/drivers/gpu/drm/drm_gem_cma_helper.c
-@@ -235,16 +235,6 @@ int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
+@@ -284,16 +284,6 @@ int drm_gem_cma_mmap(struct file *filp,
  }
  EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
  
@@ -212,11 +196,9 @@ index 11b616ef9dc2..3ec218376734 100644
  #ifdef CONFIG_DEBUG_FS
  void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m)
  {
-diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
-index ba6d995e4375..1ff89aca1fed 100644
 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
 +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
-@@ -276,7 +276,7 @@ static struct drm_driver exynos_drm_driver = {
+@@ -276,7 +276,7 @@ static struct drm_driver exynos_drm_driv
        .gem_vm_ops             = &exynos_drm_gem_vm_ops,
        .dumb_create            = exynos_drm_gem_dumb_create,
        .dumb_map_offset        = exynos_drm_gem_dumb_map_offset,
@@ -225,8 +207,6 @@ index ba6d995e4375..1ff89aca1fed 100644
        .prime_handle_to_fd     = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle     = drm_gem_prime_fd_to_handle,
        .gem_prime_export       = exynos_dmabuf_prime_export,
-diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
-index 408b71f4c95e..e83930fdf6c7 100644
 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
 +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
 @@ -735,28 +735,6 @@ unlock:
@@ -258,11 +238,9 @@ index 408b71f4c95e..e83930fdf6c7 100644
  int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  {
        struct drm_gem_object *obj = vma->vm_private_data;
-diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
-index 468766bee450..09555afdfe9c 100644
 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
 +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
-@@ -151,15 +151,6 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
+@@ -151,15 +151,6 @@ int exynos_drm_gem_dumb_map_offset(struc
                                   struct drm_device *dev, uint32_t handle,
                                   uint64_t *offset);
  
@@ -278,11 +256,9 @@ index 468766bee450..09555afdfe9c 100644
  /* page fault handler and mmap fault address(virtual) to physical memory. */
  int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
  
-diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
-index 2f77bea30b11..10ae8c52d06f 100644
 --- a/drivers/gpu/drm/gma500/gem.c
 +++ b/drivers/gpu/drm/gma500/gem.c
-@@ -162,23 +162,6 @@ int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+@@ -162,23 +162,6 @@ int psb_gem_dumb_create(struct drm_file
  }
  
  /**
@@ -306,8 +282,6 @@ index 2f77bea30b11..10ae8c52d06f 100644
   *    psb_gem_fault           -       pagefault handler for GEM objects
   *    @vma: the VMA of the GEM object
   *    @vmf: fault detail
-diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
-index bddea5807442..ed06d5ce3757 100644
 --- a/drivers/gpu/drm/gma500/psb_drv.c
 +++ b/drivers/gpu/drm/gma500/psb_drv.c
 @@ -652,7 +652,7 @@ static struct drm_driver driver = {
@@ -319,11 +293,9 @@ index bddea5807442..ed06d5ce3757 100644
        .fops = &psb_gem_fops,
        .name = DRIVER_NAME,
        .desc = DRIVER_DESC,
-diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
-index 6053b8abcd12..984cacfcbaf2 100644
 --- a/drivers/gpu/drm/gma500/psb_drv.h
 +++ b/drivers/gpu/drm/gma500/psb_drv.h
-@@ -838,8 +838,6 @@ extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
+@@ -838,8 +838,6 @@ extern int psb_gem_get_aperture(struct d
                        struct drm_file *file);
  extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
                        struct drm_mode_create_dumb *args);
@@ -332,8 +304,6 @@ index 6053b8abcd12..984cacfcbaf2 100644
  extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
                        uint32_t handle, uint64_t *offset);
  extern int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
-diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
-index 01d63a0435fb..13457e3e9cad 100644
 --- a/drivers/gpu/drm/i915/i915_drv.c
 +++ b/drivers/gpu/drm/i915/i915_drv.c
 @@ -1038,7 +1038,7 @@ static struct drm_driver driver = {
@@ -345,11 +315,9 @@ index 01d63a0435fb..13457e3e9cad 100644
        .ioctls = i915_ioctls,
        .fops = &i915_driver_fops,
        .name = DRIVER_NAME,
-diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
-index 62ec760782f5..06c31752fcb2 100644
 --- a/drivers/gpu/drm/i915/i915_drv.h
 +++ b/drivers/gpu/drm/i915/i915_drv.h
-@@ -1775,8 +1775,6 @@ int i915_gem_dumb_create(struct drm_file *file_priv,
+@@ -1775,8 +1775,6 @@ int i915_gem_dumb_create(struct drm_file
                         struct drm_mode_create_dumb *args);
  int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
                      uint32_t handle, uint64_t *offset);
@@ -358,11 +326,9 @@ index 62ec760782f5..06c31752fcb2 100644
  /**
   * Returns true if seq1 is later than seq2.
   */
-diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
-index d31e15dd173c..967fe650fa8b 100644
 --- a/drivers/gpu/drm/i915/i915_gem.c
 +++ b/drivers/gpu/drm/i915/i915_gem.c
-@@ -245,13 +245,6 @@ i915_gem_dumb_create(struct drm_file *file,
+@@ -245,13 +245,6 @@ i915_gem_dumb_create(struct drm_file *fi
                               args->size, &args->handle);
  }
  
@@ -376,8 +342,6 @@ index d31e15dd173c..967fe650fa8b 100644
  /**
   * Creates a new mm object and returns a handle to it.
   */
-diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
-index 122b571ccc7c..bd9196478735 100644
 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c
 +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
 @@ -104,7 +104,7 @@ static struct drm_driver driver = {
@@ -389,11 +353,9 @@ index 122b571ccc7c..bd9196478735 100644
  };
  
  static struct pci_driver mgag200_pci_driver = {
-diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
-index 988911afcc8b..e61ce34910d6 100644
 --- a/drivers/gpu/drm/mgag200/mgag200_drv.h
 +++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
-@@ -248,9 +248,6 @@ int mgag200_gem_init_object(struct drm_gem_object *obj);
+@@ -248,9 +248,6 @@ int mgag200_gem_init_object(struct drm_g
  int mgag200_dumb_create(struct drm_file *file,
                        struct drm_device *dev,
                        struct drm_mode_create_dumb *args);
@@ -403,11 +365,9 @@ index 988911afcc8b..e61ce34910d6 100644
  void mgag200_gem_free_object(struct drm_gem_object *obj);
  int
  mgag200_dumb_mmap_offset(struct drm_file *file,
-diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
-index 2d56e28d2b21..4529d4dd12c2 100644
 --- a/drivers/gpu/drm/mgag200/mgag200_main.c
 +++ b/drivers/gpu/drm/mgag200/mgag200_main.c
-@@ -291,13 +291,6 @@ int mgag200_dumb_create(struct drm_file *file,
+@@ -291,13 +291,6 @@ int mgag200_dumb_create(struct drm_file
        return 0;
  }
  
@@ -421,11 +381,9 @@ index 2d56e28d2b21..4529d4dd12c2 100644
  int mgag200_gem_init_object(struct drm_gem_object *obj)
  {
        BUG();
-diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
-index 52498de87a3b..05ae27277543 100644
 --- a/drivers/gpu/drm/nouveau/nouveau_display.c
 +++ b/drivers/gpu/drm/nouveau/nouveau_display.c
-@@ -689,13 +689,6 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
+@@ -689,13 +689,6 @@ nouveau_display_dumb_create(struct drm_f
  }
  
  int
@@ -439,11 +397,9 @@ index 52498de87a3b..05ae27277543 100644
  nouveau_display_dumb_map_offset(struct drm_file *file_priv,
                                struct drm_device *dev,
                                uint32_t handle, uint64_t *poffset)
-diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
-index 1ea3e4734b62..185e74132a6d 100644
 --- a/drivers/gpu/drm/nouveau/nouveau_display.h
 +++ b/drivers/gpu/drm/nouveau/nouveau_display.h
-@@ -68,8 +68,6 @@ int  nouveau_display_dumb_create(struct drm_file *, struct drm_device *,
+@@ -68,8 +68,6 @@ int  nouveau_display_dumb_create(struct
                                 struct drm_mode_create_dumb *args);
  int  nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *,
                                     u32 handle, u64 *offset);
@@ -452,8 +408,6 @@ index 1ea3e4734b62..185e74132a6d 100644
  
  void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *);
  
-diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
-index 383f4e6ea9d1..b77bcb9237e0 100644
 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c
 +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
 @@ -714,7 +714,7 @@ driver = {
@@ -465,11 +419,9 @@ index 383f4e6ea9d1..b77bcb9237e0 100644
  
        .name = DRIVER_NAME,
        .desc = DRIVER_DESC,
-diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
-index 826586ffbe83..75886a3bf639 100644
 --- a/drivers/gpu/drm/omapdrm/omap_drv.c
 +++ b/drivers/gpu/drm/omapdrm/omap_drv.c
-@@ -618,7 +618,7 @@ static struct drm_driver omap_drm_driver = {
+@@ -618,7 +618,7 @@ static struct drm_driver omap_drm_driver
                .gem_vm_ops = &omap_gem_vm_ops,
                .dumb_create = omap_gem_dumb_create,
                .dumb_map_offset = omap_gem_dumb_map_offset,
@@ -478,11 +430,9 @@ index 826586ffbe83..75886a3bf639 100644
                .ioctls = ioctls,
                .num_ioctls = DRM_OMAP_NUM_IOCTLS,
                .fops = &omapdriver_fops,
-diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
-index 215a20dd340c..fd13601ff6fb 100644
 --- a/drivers/gpu/drm/omapdrm/omap_drv.h
 +++ b/drivers/gpu/drm/omapdrm/omap_drv.h
-@@ -224,8 +224,6 @@ int omap_gem_init_object(struct drm_gem_object *obj);
+@@ -224,8 +224,6 @@ int omap_gem_init_object(struct drm_gem_
  void *omap_gem_vaddr(struct drm_gem_object *obj);
  int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
                uint32_t handle, uint64_t *offset);
@@ -491,11 +441,9 @@ index 215a20dd340c..fd13601ff6fb 100644
  int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
                struct drm_mode_create_dumb *args);
  int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
-index f90531fc00c9..b1f19702550f 100644
 --- a/drivers/gpu/drm/omapdrm/omap_gem.c
 +++ b/drivers/gpu/drm/omapdrm/omap_gem.c
-@@ -629,21 +629,6 @@ int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+@@ -629,21 +629,6 @@ int omap_gem_dumb_create(struct drm_file
  }
  
  /**
@@ -517,8 +465,6 @@ index f90531fc00c9..b1f19702550f 100644
   * omap_gem_dumb_map  -       buffer mapping for dumb interface
   * @file: our drm client file
   * @dev: drm device
-diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
-index aa291d8a98a2..60cb159c4f7d 100644
 --- a/drivers/gpu/drm/qxl/qxl_drv.c
 +++ b/drivers/gpu/drm/qxl/qxl_drv.c
 @@ -99,7 +99,7 @@ static struct drm_driver qxl_driver = {
@@ -530,11 +476,9 @@ index aa291d8a98a2..60cb159c4f7d 100644
  #if defined(CONFIG_DEBUG_FS)
        .debugfs_init = qxl_debugfs_init,
        .debugfs_cleanup = qxl_debugfs_takedown,
-diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
-index 43d06ab28a21..089fd42802dd 100644
 --- a/drivers/gpu/drm/qxl/qxl_drv.h
 +++ b/drivers/gpu/drm/qxl/qxl_drv.h
-@@ -409,9 +409,6 @@ int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
+@@ -409,9 +409,6 @@ int qxl_bo_kmap(struct qxl_bo *bo, void
  int qxl_mode_dumb_create(struct drm_file *file_priv,
                         struct drm_device *dev,
                         struct drm_mode_create_dumb *args);
@@ -544,11 +488,9 @@ index 43d06ab28a21..089fd42802dd 100644
  int qxl_mode_dumb_mmap(struct drm_file *filp,
                       struct drm_device *dev,
                       uint32_t handle, uint64_t *offset_p);
-diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c
-index 847c4ee798f7..d34bb4130ff0 100644
 --- a/drivers/gpu/drm/qxl/qxl_dumb.c
 +++ b/drivers/gpu/drm/qxl/qxl_dumb.c
-@@ -68,13 +68,6 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
+@@ -68,13 +68,6 @@ int qxl_mode_dumb_create(struct drm_file
        return 0;
  }
  
@@ -562,11 +504,9 @@ index 847c4ee798f7..d34bb4130ff0 100644
  int qxl_mode_dumb_mmap(struct drm_file *file_priv,
                       struct drm_device *dev,
                       uint32_t handle, uint64_t *offset_p)
-diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
-index d4ff48ce1d8b..0fbc44e468da 100644
 --- a/drivers/gpu/drm/radeon/radeon.h
 +++ b/drivers/gpu/drm/radeon/radeon.h
-@@ -444,9 +444,6 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
+@@ -444,9 +444,6 @@ int radeon_mode_dumb_create(struct drm_f
  int radeon_mode_dumb_mmap(struct drm_file *filp,
                          struct drm_device *dev,
                          uint32_t handle, uint64_t *offset_p);
@@ -576,11 +516,9 @@ index d4ff48ce1d8b..0fbc44e468da 100644
  
  /*
   * Semaphores.
-diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
-index 094e7e5ea39e..bef72931ea08 100644
 --- a/drivers/gpu/drm/radeon/radeon_drv.c
 +++ b/drivers/gpu/drm/radeon/radeon_drv.c
-@@ -119,9 +119,6 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
+@@ -119,9 +119,6 @@ int radeon_mode_dumb_mmap(struct drm_fil
  int radeon_mode_dumb_create(struct drm_file *file_priv,
                            struct drm_device *dev,
                            struct drm_mode_create_dumb *args);
@@ -599,11 +537,9 @@ index 094e7e5ea39e..bef72931ea08 100644
        .fops = &radeon_driver_kms_fops,
  
        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
-diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
-index aa796031ab65..dce99c8a5835 100644
 --- a/drivers/gpu/drm/radeon/radeon_gem.c
 +++ b/drivers/gpu/drm/radeon/radeon_gem.c
-@@ -570,13 +570,6 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
+@@ -570,13 +570,6 @@ int radeon_mode_dumb_create(struct drm_f
        return 0;
  }
  
@@ -617,11 +553,9 @@ index aa796031ab65..dce99c8a5835 100644
  #if defined(CONFIG_DEBUG_FS)
  static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
  {
-diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
-index f6e0b5395051..946bd28bf5da 100644
 --- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
 +++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
-@@ -285,7 +285,7 @@ static struct drm_driver shmob_drm_driver = {
+@@ -285,7 +285,7 @@ static struct drm_driver shmob_drm_drive
        .gem_vm_ops             = &drm_gem_cma_vm_ops,
        .dumb_create            = drm_gem_cma_dumb_create,
        .dumb_map_offset        = drm_gem_cma_dumb_map_offset,
@@ -630,11 +564,9 @@ index f6e0b5395051..946bd28bf5da 100644
        .fops                   = &shmob_drm_fops,
        .name                   = "shmob-drm",
        .desc                   = "Renesas SH Mobile DRM",
-diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
-index 2b5461bcd9fb..bba8daf9230c 100644
 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
 +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
-@@ -490,7 +490,7 @@ static struct drm_driver tilcdc_driver = {
+@@ -490,7 +490,7 @@ static struct drm_driver tilcdc_driver =
        .gem_vm_ops         = &drm_gem_cma_vm_ops,
        .dumb_create        = drm_gem_cma_dumb_create,
        .dumb_map_offset    = drm_gem_cma_dumb_map_offset,
@@ -643,8 +575,6 @@ index 2b5461bcd9fb..bba8daf9230c 100644
  #ifdef CONFIG_DEBUG_FS
        .debugfs_init       = tilcdc_debugfs_init,
        .debugfs_cleanup    = tilcdc_debugfs_cleanup,
-diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
-index c0770dbba74a..bb0af58c769a 100644
 --- a/drivers/gpu/drm/udl/udl_drv.c
 +++ b/drivers/gpu/drm/udl/udl_drv.c
 @@ -84,7 +84,7 @@ static struct drm_driver driver = {
@@ -656,11 +586,9 @@ index c0770dbba74a..bb0af58c769a 100644
        .fops = &udl_driver_fops,
  
        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
-diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
-index cc6d90f28c71..56aec9409fa3 100644
 --- a/drivers/gpu/drm/udl/udl_drv.h
 +++ b/drivers/gpu/drm/udl/udl_drv.h
-@@ -114,8 +114,6 @@ int udl_dumb_create(struct drm_file *file_priv,
+@@ -114,8 +114,6 @@ int udl_dumb_create(struct drm_file *fil
                    struct drm_mode_create_dumb *args);
  int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev,
                 uint32_t handle, uint64_t *offset);
@@ -669,11 +597,9 @@ index cc6d90f28c71..56aec9409fa3 100644
  
  int udl_gem_init_object(struct drm_gem_object *obj);
  void udl_gem_free_object(struct drm_gem_object *gem_obj);
-diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
-index 2a4cb2f83b36..b5e3b8038253 100644
 --- a/drivers/gpu/drm/udl/udl_gem.c
 +++ b/drivers/gpu/drm/udl/udl_gem.c
-@@ -66,12 +66,6 @@ int udl_dumb_create(struct drm_file *file,
+@@ -66,12 +66,6 @@ int udl_dumb_create(struct drm_file *fil
                              args->size, &args->handle);
  }
  
@@ -686,8 +612,6 @@ index 2a4cb2f83b36..b5e3b8038253 100644
  int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
  {
        int ret;
-diff --git a/drivers/gpu/host1x/drm/drm.c b/drivers/gpu/host1x/drm/drm.c
-index 2b561c9118c6..da15a6291bb9 100644
 --- a/drivers/gpu/host1x/drm/drm.c
 +++ b/drivers/gpu/host1x/drm/drm.c
 @@ -625,7 +625,7 @@ struct drm_driver tegra_drm_driver = {
@@ -699,11 +623,9 @@ index 2b561c9118c6..da15a6291bb9 100644
  
        .ioctls = tegra_drm_ioctls,
        .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
-diff --git a/drivers/gpu/host1x/drm/gem.c b/drivers/gpu/host1x/drm/gem.c
-index bc323b3dbe4d..3c35622c9f15 100644
 --- a/drivers/gpu/host1x/drm/gem.c
 +++ b/drivers/gpu/host1x/drm/gem.c
-@@ -261,9 +261,3 @@ int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
+@@ -261,9 +261,3 @@ int tegra_drm_mmap(struct file *file, st
  
        return ret;
  }
@@ -713,11 +635,9 @@ index bc323b3dbe4d..3c35622c9f15 100644
 -{
 -      return drm_gem_handle_delete(file, handle);
 -}
-diff --git a/drivers/gpu/host1x/drm/gem.h b/drivers/gpu/host1x/drm/gem.h
-index 34de2b486eb7..2e93b0379da8 100644
 --- a/drivers/gpu/host1x/drm/gem.h
 +++ b/drivers/gpu/host1x/drm/gem.h
-@@ -49,8 +49,6 @@ int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
+@@ -49,8 +49,6 @@ int tegra_bo_dumb_create(struct drm_file
                         struct drm_mode_create_dumb *args);
  int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
                             uint32_t handle, uint64_t *offset);
@@ -726,11 +646,9 @@ index 34de2b486eb7..2e93b0379da8 100644
  
  int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma);
  
-diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
-index a532ca568526..a18622570812 100644
 --- a/drivers/staging/imx-drm/imx-drm-core.c
 +++ b/drivers/staging/imx-drm/imx-drm-core.c
-@@ -801,7 +801,7 @@ static struct drm_driver imx_drm_driver = {
+@@ -801,7 +801,7 @@ static struct drm_driver imx_drm_driver
        .gem_vm_ops             = &drm_gem_cma_vm_ops,
        .dumb_create            = drm_gem_cma_dumb_create,
        .dumb_map_offset        = drm_gem_cma_dumb_map_offset,
@@ -739,11 +657,9 @@ index a532ca568526..a18622570812 100644
  
        .get_vblank_counter     = drm_vblank_count,
        .enable_vblank          = imx_drm_enable_vblank,
-diff --git a/include/drm/drmP.h b/include/drm/drmP.h
-index bf677c0b4cae..9a8ea57e3b94 100644
 --- a/include/drm/drmP.h
 +++ b/include/drm/drmP.h
-@@ -1589,6 +1589,9 @@ extern int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **
+@@ -1589,6 +1589,9 @@ extern int drm_prime_sg_to_page_addr_arr
  extern struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages);
  extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg);
  
@@ -753,11 +669,9 @@ index bf677c0b4cae..9a8ea57e3b94 100644
  
  void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
  void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
-diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h
-index 63397ced9254..632a6c50fab7 100644
 --- a/include/drm/drm_gem_cma_helper.h
 +++ b/include/drm/drm_gem_cma_helper.h
-@@ -27,14 +27,6 @@ int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
+@@ -30,14 +30,6 @@ int drm_gem_cma_dumb_map_offset(struct d
  /* set vm_flags and we can change the vm attribute to other one at here. */
  int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma);
  
@@ -772,6 +686,3 @@ index 63397ced9254..632a6c50fab7 100644
  /* allocate physical memory. */
  struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
                unsigned int size);
--- 
-1.8.5.rc3
-
index 211975ee8331837ed12eaa0e08905426212bd42c..4921e23ef0a2f457cb7479e557c9a9f13f43c101 100644 (file)
@@ -10,18 +10,16 @@ Signed-off-by: Wolfram Sang <wsa@the-dreams.de>
 (cherry picked from commit 3cc2d009bc210516c61536273b304c4f6ccd797c)
 Signed-off-by: Darren Hart <dvhart@linux.intel.com>
 ---
- drivers/i2c/busses/i2c-davinci.c            | 8 +-------
- drivers/i2c/busses/i2c-designware-platdrv.c | 8 +-------
- drivers/i2c/busses/i2c-imx.c                | 6 +-----
- drivers/i2c/busses/i2c-omap.c               | 8 +-------
- drivers/i2c/busses/i2c-rcar.c               | 7 +------
+ drivers/i2c/busses/i2c-davinci.c            |    8 +-------
+ drivers/i2c/busses/i2c-designware-platdrv.c |    8 +-------
+ drivers/i2c/busses/i2c-imx.c                |    6 +-----
+ drivers/i2c/busses/i2c-omap.c               |    8 +-------
+ drivers/i2c/busses/i2c-rcar.c               |    7 +------
  5 files changed, 5 insertions(+), 32 deletions(-)
 
-diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
-index cf20e06a88e1..fa556057d224 100644
 --- a/drivers/i2c/busses/i2c-davinci.c
 +++ b/drivers/i2c/busses/i2c-davinci.c
-@@ -646,13 +646,6 @@ static int davinci_i2c_probe(struct platform_device *pdev)
+@@ -646,13 +646,6 @@ static int davinci_i2c_probe(struct plat
        struct resource *mem, *irq;
        int r;
  
@@ -35,7 +33,7 @@ index cf20e06a88e1..fa556057d224 100644
        irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
        if (!irq) {
                dev_err(&pdev->dev, "no irq resource?\n");
-@@ -697,6 +690,7 @@ static int davinci_i2c_probe(struct platform_device *pdev)
+@@ -697,6 +690,7 @@ static int davinci_i2c_probe(struct plat
                return -ENODEV;
        clk_prepare_enable(dev->clk);
  
@@ -43,11 +41,9 @@ index cf20e06a88e1..fa556057d224 100644
        dev->base = devm_ioremap_resource(&pdev->dev, mem);
        if (IS_ERR(dev->base)) {
                r = PTR_ERR(dev->base);
-diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
-index 35b70a1edf57..ee46c92d7e3c 100644
 --- a/drivers/i2c/busses/i2c-designware-platdrv.c
 +++ b/drivers/i2c/busses/i2c-designware-platdrv.c
-@@ -87,13 +87,6 @@ static int dw_i2c_probe(struct platform_device *pdev)
+@@ -87,13 +87,6 @@ static int dw_i2c_probe(struct platform_
        struct resource *mem;
        int irq, r;
  
@@ -61,7 +57,7 @@ index 35b70a1edf57..ee46c92d7e3c 100644
        irq = platform_get_irq(pdev, 0);
        if (irq < 0) {
                dev_err(&pdev->dev, "no irq resource?\n");
-@@ -104,6 +97,7 @@ static int dw_i2c_probe(struct platform_device *pdev)
+@@ -104,6 +97,7 @@ static int dw_i2c_probe(struct platform_
        if (!dev)
                return -ENOMEM;
  
@@ -69,11 +65,9 @@ index 35b70a1edf57..ee46c92d7e3c 100644
        dev->base = devm_ioremap_resource(&pdev->dev, mem);
        if (IS_ERR(dev->base))
                return PTR_ERR(dev->base);
-diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
-index 8c7526ca912e..6406aa960f2a 100644
 --- a/drivers/i2c/busses/i2c-imx.c
 +++ b/drivers/i2c/busses/i2c-imx.c
-@@ -498,17 +498,13 @@ static int __init i2c_imx_probe(struct platform_device *pdev)
+@@ -498,17 +498,13 @@ static int __init i2c_imx_probe(struct p
  
        dev_dbg(&pdev->dev, "<%s>\n", __func__);
  
@@ -92,11 +86,9 @@ index 8c7526ca912e..6406aa960f2a 100644
        base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(base))
                return PTR_ERR(base);
-diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
-index b06be8e3bb76..aa77626f8315 100644
 --- a/drivers/i2c/busses/i2c-omap.c
 +++ b/drivers/i2c/busses/i2c-omap.c
-@@ -1087,13 +1087,6 @@ omap_i2c_probe(struct platform_device *pdev)
+@@ -1087,13 +1087,6 @@ omap_i2c_probe(struct platform_device *p
        u32 rev;
        u16 minor, major, scheme;
  
@@ -110,7 +102,7 @@ index b06be8e3bb76..aa77626f8315 100644
        irq = platform_get_irq(pdev, 0);
        if (irq < 0) {
                dev_err(&pdev->dev, "no irq resource?\n");
-@@ -1106,6 +1099,7 @@ omap_i2c_probe(struct platform_device *pdev)
+@@ -1106,6 +1099,7 @@ omap_i2c_probe(struct platform_device *p
                return -ENOMEM;
        }
  
@@ -118,11 +110,9 @@ index b06be8e3bb76..aa77626f8315 100644
        dev->base = devm_ioremap_resource(&pdev->dev, mem);
        if (IS_ERR(dev->base))
                return PTR_ERR(dev->base);
-diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
-index 4ba4a95b6b26..0fc585861610 100644
 --- a/drivers/i2c/busses/i2c-rcar.c
 +++ b/drivers/i2c/busses/i2c-rcar.c
-@@ -623,12 +623,6 @@ static int rcar_i2c_probe(struct platform_device *pdev)
+@@ -658,12 +658,6 @@ static int rcar_i2c_probe(struct platfor
        u32 bus_speed;
        int ret;
  
@@ -135,7 +125,7 @@ index 4ba4a95b6b26..0fc585861610 100644
        priv = devm_kzalloc(dev, sizeof(struct rcar_i2c_priv), GFP_KERNEL);
        if (!priv) {
                dev_err(dev, "no mem for private data\n");
-@@ -642,6 +636,7 @@ static int rcar_i2c_probe(struct platform_device *pdev)
+@@ -685,6 +679,7 @@ static int rcar_i2c_probe(struct platfor
        if (ret < 0)
                return ret;
  
@@ -143,6 +133,3 @@ index 4ba4a95b6b26..0fc585861610 100644
        priv->io = devm_ioremap_resource(dev, res);
        if (IS_ERR(priv->io))
                return PTR_ERR(priv->io);
--- 
-1.8.5.rc3
-
index 2038b356f83f418027f8df60f03a35979e9d3238..a08d341a2087a13e5adf45492f760b0bc56ce478 100644 (file)
@@ -20,17 +20,24 @@ Signed-off-by: Darren Hart <dvhart@linux.intel.com>
  drivers/dma/dw/Makefile             |  1 +
  drivers/dma/{ => dw}/dw_dmac.c      |  2 +-
  drivers/dma/{ => dw}/dw_dmac_regs.h |  0
- 7 files changed, 28 insertions(+), 23 deletions(-)
+ MAINTAINERS                   |    3 
+ drivers/dma/Kconfig           |   20 
+ drivers/dma/Makefile          |    2 
+ drivers/dma/dw/Kconfig        |   23 
+ drivers/dma/dw/Makefile       |    1 
+ drivers/dma/dw/dw_dmac.c      | 1969 ++++++++++++++++++++++++++++++++++++++++++
+ drivers/dma/dw/dw_dmac_regs.h |  311 ++++++
+ drivers/dma/dw_dmac.c         | 1969 ------------------------------------------
+ drivers/dma/dw_dmac_regs.h    |  311 ------
+ 9 files changed, 2307 insertions(+), 2302 deletions(-)
  create mode 100644 drivers/dma/dw/Kconfig
  create mode 100644 drivers/dma/dw/Makefile
  rename drivers/dma/{ => dw}/dw_dmac.c (99%)
  rename drivers/dma/{ => dw}/dw_dmac_regs.h (100%)
 
-diff --git a/MAINTAINERS b/MAINTAINERS
-index 30287b8a223a..124d32cae616 100644
 --- a/MAINTAINERS
 +++ b/MAINTAINERS
-@@ -6991,8 +6991,7 @@ SYNOPSYS DESIGNWARE DMAC DRIVER
+@@ -6998,8 +6998,7 @@ SYNOPSYS DESIGNWARE DMAC DRIVER
  M:    Viresh Kumar <viresh.linux@gmail.com>
  S:    Maintained
  F:    include/linux/dw_dmac.h
@@ -40,8 +47,6 @@ index 30287b8a223a..124d32cae616 100644
  
  SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER
  M:    Seungwon Jeon <tgih.jun@samsung.com>
-diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
-index e9924898043a..146a1d864a71 100644
 --- a/drivers/dma/Kconfig
 +++ b/drivers/dma/Kconfig
 @@ -79,25 +79,7 @@ config INTEL_IOP_ADMA
@@ -71,8 +76,6 @@ index e9924898043a..146a1d864a71 100644
  
  config AT_HDMAC
        tristate "Atmel AHB DMA support"
-diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
-index a2b0df591f95..ac44ca0d468a 100644
 --- a/drivers/dma/Makefile
 +++ b/drivers/dma/Makefile
 @@ -15,7 +15,7 @@ obj-$(CONFIG_FSL_DMA) += fsldma.o
@@ -84,9 +87,6 @@ index a2b0df591f95..ac44ca0d468a 100644
  obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
  obj-$(CONFIG_MX3_IPU) += ipu/
  obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
-diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig
-new file mode 100644
-index 000000000000..38a215af5ccc
 --- /dev/null
 +++ b/drivers/dma/dw/Kconfig
 @@ -0,0 +1,23 @@
@@ -113,34 +113,4579 @@ index 000000000000..38a215af5ccc
 +        like the Atmel AVR32 architecture.
 +
 +        If unsure, use the default setting.
-diff --git a/drivers/dma/dw/Makefile b/drivers/dma/dw/Makefile
-new file mode 100644
-index 000000000000..dd8d9936beef
 --- /dev/null
 +++ b/drivers/dma/dw/Makefile
 @@ -0,0 +1 @@
 +obj-$(CONFIG_DW_DMAC) += dw_dmac.o
-diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw/dw_dmac.c
-similarity index 99%
-rename from drivers/dma/dw_dmac.c
-rename to drivers/dma/dw/dw_dmac.c
-index 2b65ba614e60..15f3f4f79c10 100644
---- a/drivers/dma/dw_dmac.c
+--- /dev/null
 +++ b/drivers/dma/dw/dw_dmac.c
-@@ -28,8 +28,8 @@
- #include <linux/acpi.h>
- #include <linux/acpi_dma.h>
+@@ -0,0 +1,1969 @@
++/*
++ * Core driver for the Synopsys DesignWare DMA Controller
++ *
++ * Copyright (C) 2007-2008 Atmel Corporation
++ * Copyright (C) 2010-2011 ST Microelectronics
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/bitops.h>
++#include <linux/clk.h>
++#include <linux/delay.h>
++#include <linux/dmaengine.h>
++#include <linux/dma-mapping.h>
++#include <linux/dmapool.h>
++#include <linux/err.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/io.h>
++#include <linux/of.h>
++#include <linux/of_dma.h>
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/slab.h>
++#include <linux/acpi.h>
++#include <linux/acpi_dma.h>
++
 +#include "../dmaengine.h"
- #include "dw_dmac_regs.h"
--#include "dmaengine.h"
- /*
-  * This supports the Synopsys "DesignWare AHB Central DMA Controller",
-diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw/dw_dmac_regs.h
-similarity index 100%
-rename from drivers/dma/dw_dmac_regs.h
-rename to drivers/dma/dw/dw_dmac_regs.h
--- 
-1.8.5.rc3
-
++#include "dw_dmac_regs.h"
++
++/*
++ * This supports the Synopsys "DesignWare AHB Central DMA Controller",
++ * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
++ * of which use ARM any more).  See the "Databook" from Synopsys for
++ * information beyond what licensees probably provide.
++ *
++ * The driver has currently been tested only with the Atmel AT32AP7000,
++ * which does not support descriptor writeback.
++ */
++
++static inline unsigned int dwc_get_dms(struct dw_dma_slave *slave)
++{
++      return slave ? slave->dst_master : 0;
++}
++
++static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave)
++{
++      return slave ? slave->src_master : 1;
++}
++
++static inline void dwc_set_masters(struct dw_dma_chan *dwc)
++{
++      struct dw_dma *dw = to_dw_dma(dwc->chan.device);
++      struct dw_dma_slave *dws = dwc->chan.private;
++      unsigned char mmax = dw->nr_masters - 1;
++
++      if (dwc->request_line == ~0) {
++              dwc->src_master = min_t(unsigned char, mmax, dwc_get_sms(dws));
++              dwc->dst_master = min_t(unsigned char, mmax, dwc_get_dms(dws));
++      }
++}
++
++#define DWC_DEFAULT_CTLLO(_chan) ({                           \
++              struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan);       \
++              struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
++              bool _is_slave = is_slave_direction(_dwc->direction);   \
++              u8 _smsize = _is_slave ? _sconfig->src_maxburst :       \
++                      DW_DMA_MSIZE_16;                        \
++              u8 _dmsize = _is_slave ? _sconfig->dst_maxburst :       \
++                      DW_DMA_MSIZE_16;                        \
++                                                              \
++              (DWC_CTLL_DST_MSIZE(_dmsize)                    \
++               | DWC_CTLL_SRC_MSIZE(_smsize)                  \
++               | DWC_CTLL_LLP_D_EN                            \
++               | DWC_CTLL_LLP_S_EN                            \
++               | DWC_CTLL_DMS(_dwc->dst_master)               \
++               | DWC_CTLL_SMS(_dwc->src_master));             \
++      })
++
++/*
++ * Number of descriptors to allocate for each channel. This should be
++ * made configurable somehow; preferably, the clients (at least the
++ * ones using slave transfers) should be able to give us a hint.
++ */
++#define NR_DESCS_PER_CHANNEL  64
++
++/*----------------------------------------------------------------------*/
++
++static struct device *chan2dev(struct dma_chan *chan)
++{
++      return &chan->dev->device;
++}
++static struct device *chan2parent(struct dma_chan *chan)
++{
++      return chan->dev->device.parent;
++}
++
++static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
++{
++      return to_dw_desc(dwc->active_list.next);
++}
++
++static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
++{
++      struct dw_desc *desc, *_desc;
++      struct dw_desc *ret = NULL;
++      unsigned int i = 0;
++      unsigned long flags;
++
++      spin_lock_irqsave(&dwc->lock, flags);
++      list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
++              i++;
++              if (async_tx_test_ack(&desc->txd)) {
++                      list_del(&desc->desc_node);
++                      ret = desc;
++                      break;
++              }
++              dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
++      }
++      spin_unlock_irqrestore(&dwc->lock, flags);
++
++      dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
++
++      return ret;
++}
++
++/*
++ * Move a descriptor, including any children, to the free list.
++ * `desc' must not be on any lists.
++ */
++static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
++{
++      unsigned long flags;
++
++      if (desc) {
++              struct dw_desc *child;
++
++              spin_lock_irqsave(&dwc->lock, flags);
++              list_for_each_entry(child, &desc->tx_list, desc_node)
++                      dev_vdbg(chan2dev(&dwc->chan),
++                                      "moving child desc %p to freelist\n",
++                                      child);
++              list_splice_init(&desc->tx_list, &dwc->free_list);
++              dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
++              list_add(&desc->desc_node, &dwc->free_list);
++              spin_unlock_irqrestore(&dwc->lock, flags);
++      }
++}
++
++static void dwc_initialize(struct dw_dma_chan *dwc)
++{
++      struct dw_dma *dw = to_dw_dma(dwc->chan.device);
++      struct dw_dma_slave *dws = dwc->chan.private;
++      u32 cfghi = DWC_CFGH_FIFO_MODE;
++      u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
++
++      if (dwc->initialized == true)
++              return;
++
++      if (dws) {
++              /*
++               * We need controller-specific data to set up slave
++               * transfers.
++               */
++              BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
++
++              cfghi = dws->cfg_hi;
++              cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
++      } else {
++              if (dwc->direction == DMA_MEM_TO_DEV)
++                      cfghi = DWC_CFGH_DST_PER(dwc->request_line);
++              else if (dwc->direction == DMA_DEV_TO_MEM)
++                      cfghi = DWC_CFGH_SRC_PER(dwc->request_line);
++      }
++
++      channel_writel(dwc, CFG_LO, cfglo);
++      channel_writel(dwc, CFG_HI, cfghi);
++
++      /* Enable interrupts */
++      channel_set_bit(dw, MASK.XFER, dwc->mask);
++      channel_set_bit(dw, MASK.ERROR, dwc->mask);
++
++      dwc->initialized = true;
++}
++
++/*----------------------------------------------------------------------*/
++
++static inline unsigned int dwc_fast_fls(unsigned long long v)
++{
++      /*
++       * We can be a lot more clever here, but this should take care
++       * of the most common optimization.
++       */
++      if (!(v & 7))
++              return 3;
++      else if (!(v & 3))
++              return 2;
++      else if (!(v & 1))
++              return 1;
++      return 0;
++}
++
++static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
++{
++      dev_err(chan2dev(&dwc->chan),
++              "  SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
++              channel_readl(dwc, SAR),
++              channel_readl(dwc, DAR),
++              channel_readl(dwc, LLP),
++              channel_readl(dwc, CTL_HI),
++              channel_readl(dwc, CTL_LO));
++}
++
++static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
++{
++      channel_clear_bit(dw, CH_EN, dwc->mask);
++      while (dma_readl(dw, CH_EN) & dwc->mask)
++              cpu_relax();
++}
++
++/*----------------------------------------------------------------------*/
++
++/* Perform single block transfer */
++static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
++                                     struct dw_desc *desc)
++{
++      struct dw_dma   *dw = to_dw_dma(dwc->chan.device);
++      u32             ctllo;
++
++      /* Software emulation of LLP mode relies on interrupts to continue
++       * multi block transfer. */
++      ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN;
++
++      channel_writel(dwc, SAR, desc->lli.sar);
++      channel_writel(dwc, DAR, desc->lli.dar);
++      channel_writel(dwc, CTL_LO, ctllo);
++      channel_writel(dwc, CTL_HI, desc->lli.ctlhi);
++      channel_set_bit(dw, CH_EN, dwc->mask);
++
++      /* Move pointer to next descriptor */
++      dwc->tx_node_active = dwc->tx_node_active->next;
++}
++
++/* Called with dwc->lock held and bh disabled */
++static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
++{
++      struct dw_dma   *dw = to_dw_dma(dwc->chan.device);
++      unsigned long   was_soft_llp;
++
++      /* ASSERT:  channel is idle */
++      if (dma_readl(dw, CH_EN) & dwc->mask) {
++              dev_err(chan2dev(&dwc->chan),
++                      "BUG: Attempted to start non-idle channel\n");
++              dwc_dump_chan_regs(dwc);
++
++              /* The tasklet will hopefully advance the queue... */
++              return;
++      }
++
++      if (dwc->nollp) {
++              was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP,
++                                              &dwc->flags);
++              if (was_soft_llp) {
++                      dev_err(chan2dev(&dwc->chan),
++                              "BUG: Attempted to start new LLP transfer "
++                              "inside ongoing one\n");
++                      return;
++              }
++
++              dwc_initialize(dwc);
++
++              dwc->residue = first->total_len;
++              dwc->tx_node_active = &first->tx_list;
++
++              /* Submit first block */
++              dwc_do_single_block(dwc, first);
++
++              return;
++      }
++
++      dwc_initialize(dwc);
++
++      channel_writel(dwc, LLP, first->txd.phys);
++      channel_writel(dwc, CTL_LO,
++                      DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
++      channel_writel(dwc, CTL_HI, 0);
++      channel_set_bit(dw, CH_EN, dwc->mask);
++}
++
++/*----------------------------------------------------------------------*/
++
++static void
++dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
++              bool callback_required)
++{
++      dma_async_tx_callback           callback = NULL;
++      void                            *param = NULL;
++      struct dma_async_tx_descriptor  *txd = &desc->txd;
++      struct dw_desc                  *child;
++      unsigned long                   flags;
++
++      dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
++
++      spin_lock_irqsave(&dwc->lock, flags);
++      dma_cookie_complete(txd);
++      if (callback_required) {
++              callback = txd->callback;
++              param = txd->callback_param;
++      }
++
++      /* async_tx_ack */
++      list_for_each_entry(child, &desc->tx_list, desc_node)
++              async_tx_ack(&child->txd);
++      async_tx_ack(&desc->txd);
++
++      list_splice_init(&desc->tx_list, &dwc->free_list);
++      list_move(&desc->desc_node, &dwc->free_list);
++
++      if (!is_slave_direction(dwc->direction)) {
++              struct device *parent = chan2parent(&dwc->chan);
++              if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
++                      if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
++                              dma_unmap_single(parent, desc->lli.dar,
++                                      desc->total_len, DMA_FROM_DEVICE);
++                      else
++                              dma_unmap_page(parent, desc->lli.dar,
++                                      desc->total_len, DMA_FROM_DEVICE);
++              }
++              if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
++                      if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
++                              dma_unmap_single(parent, desc->lli.sar,
++                                      desc->total_len, DMA_TO_DEVICE);
++                      else
++                              dma_unmap_page(parent, desc->lli.sar,
++                                      desc->total_len, DMA_TO_DEVICE);
++              }
++      }
++
++      spin_unlock_irqrestore(&dwc->lock, flags);
++
++      if (callback)
++              callback(param);
++}
++
++static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
++{
++      struct dw_desc *desc, *_desc;
++      LIST_HEAD(list);
++      unsigned long flags;
++
++      spin_lock_irqsave(&dwc->lock, flags);
++      if (dma_readl(dw, CH_EN) & dwc->mask) {
++              dev_err(chan2dev(&dwc->chan),
++                      "BUG: XFER bit set, but channel not idle!\n");
++
++              /* Try to continue after resetting the channel... */
++              dwc_chan_disable(dw, dwc);
++      }
++
++      /*
++       * Submit queued descriptors ASAP, i.e. before we go through
++       * the completed ones.
++       */
++      list_splice_init(&dwc->active_list, &list);
++      if (!list_empty(&dwc->queue)) {
++              list_move(dwc->queue.next, &dwc->active_list);
++              dwc_dostart(dwc, dwc_first_active(dwc));
++      }
++
++      spin_unlock_irqrestore(&dwc->lock, flags);
++
++      list_for_each_entry_safe(desc, _desc, &list, desc_node)
++              dwc_descriptor_complete(dwc, desc, true);
++}
++
++/* Returns how many bytes were already received from source */
++static inline u32 dwc_get_sent(struct dw_dma_chan *dwc)
++{
++      u32 ctlhi = channel_readl(dwc, CTL_HI);
++      u32 ctllo = channel_readl(dwc, CTL_LO);
++
++      return (ctlhi & DWC_CTLH_BLOCK_TS_MASK) * (1 << (ctllo >> 4 & 7));
++}
++
++static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
++{
++      dma_addr_t llp;
++      struct dw_desc *desc, *_desc;
++      struct dw_desc *child;
++      u32 status_xfer;
++      unsigned long flags;
++
++      spin_lock_irqsave(&dwc->lock, flags);
++      llp = channel_readl(dwc, LLP);
++      status_xfer = dma_readl(dw, RAW.XFER);
++
++      if (status_xfer & dwc->mask) {
++              /* Everything we've submitted is done */
++              dma_writel(dw, CLEAR.XFER, dwc->mask);
++
++              if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
++                      struct list_head *head, *active = dwc->tx_node_active;
++
++                      /*
++                       * We are inside first active descriptor.
++                       * Otherwise something is really wrong.
++                       */
++                      desc = dwc_first_active(dwc);
++
++                      head = &desc->tx_list;
++                      if (active != head) {
++                              /* Update desc to reflect last sent one */
++                              if (active != head->next)
++                                      desc = to_dw_desc(active->prev);
++
++                              dwc->residue -= desc->len;
++
++                              child = to_dw_desc(active);
++
++                              /* Submit next block */
++                              dwc_do_single_block(dwc, child);
++
++                              spin_unlock_irqrestore(&dwc->lock, flags);
++                              return;
++                      }
++
++                      /* We are done here */
++                      clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
++              }
++
++              dwc->residue = 0;
++
++              spin_unlock_irqrestore(&dwc->lock, flags);
++
++              dwc_complete_all(dw, dwc);
++              return;
++      }
++
++      if (list_empty(&dwc->active_list)) {
++              dwc->residue = 0;
++              spin_unlock_irqrestore(&dwc->lock, flags);
++              return;
++      }
++
++      if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
++              dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__);
++              spin_unlock_irqrestore(&dwc->lock, flags);
++              return;
++      }
++
++      dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__,
++                      (unsigned long long)llp);
++
++      list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
++              /* Initial residue value */
++              dwc->residue = desc->total_len;
++
++              /* Check first descriptors addr */
++              if (desc->txd.phys == llp) {
++                      spin_unlock_irqrestore(&dwc->lock, flags);
++                      return;
++              }
++
++              /* Check first descriptors llp */
++              if (desc->lli.llp == llp) {
++                      /* This one is currently in progress */
++                      dwc->residue -= dwc_get_sent(dwc);
++                      spin_unlock_irqrestore(&dwc->lock, flags);
++                      return;
++              }
++
++              dwc->residue -= desc->len;
++              list_for_each_entry(child, &desc->tx_list, desc_node) {
++                      if (child->lli.llp == llp) {
++                              /* Currently in progress */
++                              dwc->residue -= dwc_get_sent(dwc);
++                              spin_unlock_irqrestore(&dwc->lock, flags);
++                              return;
++                      }
++                      dwc->residue -= child->len;
++              }
++
++              /*
++               * No descriptors so far seem to be in progress, i.e.
++               * this one must be done.
++               */
++              spin_unlock_irqrestore(&dwc->lock, flags);
++              dwc_descriptor_complete(dwc, desc, true);
++              spin_lock_irqsave(&dwc->lock, flags);
++      }
++
++      dev_err(chan2dev(&dwc->chan),
++              "BUG: All descriptors done, but channel not idle!\n");
++
++      /* Try to continue after resetting the channel... */
++      dwc_chan_disable(dw, dwc);
++
++      if (!list_empty(&dwc->queue)) {
++              list_move(dwc->queue.next, &dwc->active_list);
++              dwc_dostart(dwc, dwc_first_active(dwc));
++      }
++      spin_unlock_irqrestore(&dwc->lock, flags);
++}
++
++static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
++{
++      dev_crit(chan2dev(&dwc->chan), "  desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
++               lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo);
++}
++
++static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
++{
++      struct dw_desc *bad_desc;
++      struct dw_desc *child;
++      unsigned long flags;
++
++      dwc_scan_descriptors(dw, dwc);
++
++      spin_lock_irqsave(&dwc->lock, flags);
++
++      /*
++       * The descriptor currently at the head of the active list is
++       * borked. Since we don't have any way to report errors, we'll
++       * just have to scream loudly and try to carry on.
++       */
++      bad_desc = dwc_first_active(dwc);
++      list_del_init(&bad_desc->desc_node);
++      list_move(dwc->queue.next, dwc->active_list.prev);
++
++      /* Clear the error flag and try to restart the controller */
++      dma_writel(dw, CLEAR.ERROR, dwc->mask);
++      if (!list_empty(&dwc->active_list))
++              dwc_dostart(dwc, dwc_first_active(dwc));
++
++      /*
++       * WARN may seem harsh, but since this only happens
++       * when someone submits a bad physical address in a
++       * descriptor, we should consider ourselves lucky that the
++       * controller flagged an error instead of scribbling over
++       * random memory locations.
++       */
++      dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n"
++                                     "  cookie: %d\n", bad_desc->txd.cookie);
++      dwc_dump_lli(dwc, &bad_desc->lli);
++      list_for_each_entry(child, &bad_desc->tx_list, desc_node)
++              dwc_dump_lli(dwc, &child->lli);
++
++      spin_unlock_irqrestore(&dwc->lock, flags);
++
++      /* Pretend the descriptor completed successfully */
++      dwc_descriptor_complete(dwc, bad_desc, true);
++}
++
++/* --------------------- Cyclic DMA API extensions -------------------- */
++
++dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
++{
++      struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
++      return channel_readl(dwc, SAR);
++}
++EXPORT_SYMBOL(dw_dma_get_src_addr);
++
++dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
++{
++      struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
++      return channel_readl(dwc, DAR);
++}
++EXPORT_SYMBOL(dw_dma_get_dst_addr);
++
++/* Called with dwc->lock held and all DMAC interrupts disabled */
++static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
++              u32 status_err, u32 status_xfer)
++{
++      unsigned long flags;
++
++      if (dwc->mask) {
++              void (*callback)(void *param);
++              void *callback_param;
++
++              dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
++                              channel_readl(dwc, LLP));
++
++              callback = dwc->cdesc->period_callback;
++              callback_param = dwc->cdesc->period_callback_param;
++
++              if (callback)
++                      callback(callback_param);
++      }
++
++      /*
++       * Error and transfer complete are highly unlikely, and will most
++       * likely be due to a configuration error by the user.
++       */
++      if (unlikely(status_err & dwc->mask) ||
++                      unlikely(status_xfer & dwc->mask)) {
++              int i;
++
++              dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
++                              "interrupt, stopping DMA transfer\n",
++                              status_xfer ? "xfer" : "error");
++
++              spin_lock_irqsave(&dwc->lock, flags);
++
++              dwc_dump_chan_regs(dwc);
++
++              dwc_chan_disable(dw, dwc);
++
++              /* Make sure DMA does not restart by loading a new list */
++              channel_writel(dwc, LLP, 0);
++              channel_writel(dwc, CTL_LO, 0);
++              channel_writel(dwc, CTL_HI, 0);
++
++              dma_writel(dw, CLEAR.ERROR, dwc->mask);
++              dma_writel(dw, CLEAR.XFER, dwc->mask);
++
++              for (i = 0; i < dwc->cdesc->periods; i++)
++                      dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
++
++              spin_unlock_irqrestore(&dwc->lock, flags);
++      }
++}
++
++/* ------------------------------------------------------------------------- */
++
++static void dw_dma_tasklet(unsigned long data)
++{
++      struct dw_dma *dw = (struct dw_dma *)data;
++      struct dw_dma_chan *dwc;
++      u32 status_xfer;
++      u32 status_err;
++      int i;
++
++      status_xfer = dma_readl(dw, RAW.XFER);
++      status_err = dma_readl(dw, RAW.ERROR);
++
++      dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err);
++
++      for (i = 0; i < dw->dma.chancnt; i++) {
++              dwc = &dw->chan[i];
++              if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
++                      dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
++              else if (status_err & (1 << i))
++                      dwc_handle_error(dw, dwc);
++              else if (status_xfer & (1 << i))
++                      dwc_scan_descriptors(dw, dwc);
++      }
++
++      /*
++       * Re-enable interrupts.
++       */
++      channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
++      channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
++}
++
++static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
++{
++      struct dw_dma *dw = dev_id;
++      u32 status;
++
++      dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__,
++                      dma_readl(dw, STATUS_INT));
++
++      /*
++       * Just disable the interrupts. We'll turn them back on in the
++       * softirq handler.
++       */
++      channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
++      channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
++
++      status = dma_readl(dw, STATUS_INT);
++      if (status) {
++              dev_err(dw->dma.dev,
++                      "BUG: Unexpected interrupts pending: 0x%x\n",
++                      status);
++
++              /* Try to recover */
++              channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
++              channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
++              channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
++              channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
++      }
++
++      tasklet_schedule(&dw->tasklet);
++
++      return IRQ_HANDLED;
++}
++
++/*----------------------------------------------------------------------*/
++
++static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
++{
++      struct dw_desc          *desc = txd_to_dw_desc(tx);
++      struct dw_dma_chan      *dwc = to_dw_dma_chan(tx->chan);
++      dma_cookie_t            cookie;
++      unsigned long           flags;
++
++      spin_lock_irqsave(&dwc->lock, flags);
++      cookie = dma_cookie_assign(tx);
++
++      /*
++       * REVISIT: We should attempt to chain as many descriptors as
++       * possible, perhaps even appending to those already submitted
++       * for DMA. But this is hard to do in a race-free manner.
++       */
++      if (list_empty(&dwc->active_list)) {
++              dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__,
++                              desc->txd.cookie);
++              list_add_tail(&desc->desc_node, &dwc->active_list);
++              dwc_dostart(dwc, dwc_first_active(dwc));
++      } else {
++              dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__,
++                              desc->txd.cookie);
++
++              list_add_tail(&desc->desc_node, &dwc->queue);
++      }
++
++      spin_unlock_irqrestore(&dwc->lock, flags);
++
++      return cookie;
++}
++
++static struct dma_async_tx_descriptor *
++dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
++              size_t len, unsigned long flags)
++{
++      struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
++      struct dw_dma           *dw = to_dw_dma(chan->device);
++      struct dw_desc          *desc;
++      struct dw_desc          *first;
++      struct dw_desc          *prev;
++      size_t                  xfer_count;
++      size_t                  offset;
++      unsigned int            src_width;
++      unsigned int            dst_width;
++      unsigned int            data_width;
++      u32                     ctllo;
++
++      dev_vdbg(chan2dev(chan),
++                      "%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__,
++                      (unsigned long long)dest, (unsigned long long)src,
++                      len, flags);
++
++      if (unlikely(!len)) {
++              dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
++              return NULL;
++      }
++
++      dwc->direction = DMA_MEM_TO_MEM;
++
++      data_width = min_t(unsigned int, dw->data_width[dwc->src_master],
++                         dw->data_width[dwc->dst_master]);
++
++      src_width = dst_width = min_t(unsigned int, data_width,
++                                    dwc_fast_fls(src | dest | len));
++
++      ctllo = DWC_DEFAULT_CTLLO(chan)
++                      | DWC_CTLL_DST_WIDTH(dst_width)
++                      | DWC_CTLL_SRC_WIDTH(src_width)
++                      | DWC_CTLL_DST_INC
++                      | DWC_CTLL_SRC_INC
++                      | DWC_CTLL_FC_M2M;
++      prev = first = NULL;
++
++      for (offset = 0; offset < len; offset += xfer_count << src_width) {
++              xfer_count = min_t(size_t, (len - offset) >> src_width,
++                                         dwc->block_size);
++
++              desc = dwc_desc_get(dwc);
++              if (!desc)
++                      goto err_desc_get;
++
++              desc->lli.sar = src + offset;
++              desc->lli.dar = dest + offset;
++              desc->lli.ctllo = ctllo;
++              desc->lli.ctlhi = xfer_count;
++              desc->len = xfer_count << src_width;
++
++              if (!first) {
++                      first = desc;
++              } else {
++                      prev->lli.llp = desc->txd.phys;
++                      list_add_tail(&desc->desc_node,
++                                      &first->tx_list);
++              }
++              prev = desc;
++      }
++
++      if (flags & DMA_PREP_INTERRUPT)
++              /* Trigger interrupt after last block */
++              prev->lli.ctllo |= DWC_CTLL_INT_EN;
++
++      prev->lli.llp = 0;
++      first->txd.flags = flags;
++      first->total_len = len;
++
++      return &first->txd;
++
++err_desc_get:
++      dwc_desc_put(dwc, first);
++      return NULL;
++}
++
++static struct dma_async_tx_descriptor *
++dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
++              unsigned int sg_len, enum dma_transfer_direction direction,
++              unsigned long flags, void *context)
++{
++      struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
++      struct dw_dma           *dw = to_dw_dma(chan->device);
++      struct dma_slave_config *sconfig = &dwc->dma_sconfig;
++      struct dw_desc          *prev;
++      struct dw_desc          *first;
++      u32                     ctllo;
++      dma_addr_t              reg;
++      unsigned int            reg_width;
++      unsigned int            mem_width;
++      unsigned int            data_width;
++      unsigned int            i;
++      struct scatterlist      *sg;
++      size_t                  total_len = 0;
++
++      dev_vdbg(chan2dev(chan), "%s\n", __func__);
++
++      if (unlikely(!is_slave_direction(direction) || !sg_len))
++              return NULL;
++
++      dwc->direction = direction;
++
++      prev = first = NULL;
++
++      switch (direction) {
++      case DMA_MEM_TO_DEV:
++              reg_width = __fls(sconfig->dst_addr_width);
++              reg = sconfig->dst_addr;
++              ctllo = (DWC_DEFAULT_CTLLO(chan)
++                              | DWC_CTLL_DST_WIDTH(reg_width)
++                              | DWC_CTLL_DST_FIX
++                              | DWC_CTLL_SRC_INC);
++
++              ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
++                      DWC_CTLL_FC(DW_DMA_FC_D_M2P);
++
++              data_width = dw->data_width[dwc->src_master];
++
++              for_each_sg(sgl, sg, sg_len, i) {
++                      struct dw_desc  *desc;
++                      u32             len, dlen, mem;
++
++                      mem = sg_dma_address(sg);
++                      len = sg_dma_len(sg);
++
++                      mem_width = min_t(unsigned int,
++                                        data_width, dwc_fast_fls(mem | len));
++
++slave_sg_todev_fill_desc:
++                      desc = dwc_desc_get(dwc);
++                      if (!desc) {
++                              dev_err(chan2dev(chan),
++                                      "not enough descriptors available\n");
++                              goto err_desc_get;
++                      }
++
++                      desc->lli.sar = mem;
++                      desc->lli.dar = reg;
++                      desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
++                      if ((len >> mem_width) > dwc->block_size) {
++                              dlen = dwc->block_size << mem_width;
++                              mem += dlen;
++                              len -= dlen;
++                      } else {
++                              dlen = len;
++                              len = 0;
++                      }
++
++                      desc->lli.ctlhi = dlen >> mem_width;
++                      desc->len = dlen;
++
++                      if (!first) {
++                              first = desc;
++                      } else {
++                              prev->lli.llp = desc->txd.phys;
++                              list_add_tail(&desc->desc_node,
++                                              &first->tx_list);
++                      }
++                      prev = desc;
++                      total_len += dlen;
++
++                      if (len)
++                              goto slave_sg_todev_fill_desc;
++              }
++              break;
++      case DMA_DEV_TO_MEM:
++              reg_width = __fls(sconfig->src_addr_width);
++              reg = sconfig->src_addr;
++              ctllo = (DWC_DEFAULT_CTLLO(chan)
++                              | DWC_CTLL_SRC_WIDTH(reg_width)
++                              | DWC_CTLL_DST_INC
++                              | DWC_CTLL_SRC_FIX);
++
++              ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
++                      DWC_CTLL_FC(DW_DMA_FC_D_P2M);
++
++              data_width = dw->data_width[dwc->dst_master];
++
++              for_each_sg(sgl, sg, sg_len, i) {
++                      struct dw_desc  *desc;
++                      u32             len, dlen, mem;
++
++                      mem = sg_dma_address(sg);
++                      len = sg_dma_len(sg);
++
++                      mem_width = min_t(unsigned int,
++                                        data_width, dwc_fast_fls(mem | len));
++
++slave_sg_fromdev_fill_desc:
++                      desc = dwc_desc_get(dwc);
++                      if (!desc) {
++                              dev_err(chan2dev(chan),
++                                              "not enough descriptors available\n");
++                              goto err_desc_get;
++                      }
++
++                      desc->lli.sar = reg;
++                      desc->lli.dar = mem;
++                      desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
++                      if ((len >> reg_width) > dwc->block_size) {
++                              dlen = dwc->block_size << reg_width;
++                              mem += dlen;
++                              len -= dlen;
++                      } else {
++                              dlen = len;
++                              len = 0;
++                      }
++                      desc->lli.ctlhi = dlen >> reg_width;
++                      desc->len = dlen;
++
++                      if (!first) {
++                              first = desc;
++                      } else {
++                              prev->lli.llp = desc->txd.phys;
++                              list_add_tail(&desc->desc_node,
++                                              &first->tx_list);
++                      }
++                      prev = desc;
++                      total_len += dlen;
++
++                      if (len)
++                              goto slave_sg_fromdev_fill_desc;
++              }
++              break;
++      default:
++              return NULL;
++      }
++
++      if (flags & DMA_PREP_INTERRUPT)
++              /* Trigger interrupt after last block */
++              prev->lli.ctllo |= DWC_CTLL_INT_EN;
++
++      prev->lli.llp = 0;
++      first->total_len = total_len;
++
++      return &first->txd;
++
++err_desc_get:
++      dwc_desc_put(dwc, first);
++      return NULL;
++}
++
++/*
++ * Fix sconfig's burst size according to dw_dmac. We need to convert them as:
++ * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
++ *
++ * NOTE: burst size 2 is not supported by controller.
++ *
++ * This can be done by finding least significant bit set: n & (n - 1)
++ */
++static inline void convert_burst(u32 *maxburst)
++{
++      if (*maxburst > 1)
++              *maxburst = fls(*maxburst) - 2;
++      else
++              *maxburst = 0;
++}
++
++static int
++set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
++{
++      struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
++
++      /* Check if chan will be configured for slave transfers */
++      if (!is_slave_direction(sconfig->direction))
++              return -EINVAL;
++
++      memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
++      dwc->direction = sconfig->direction;
++
++      /* Take the request line from slave_id member */
++      if (dwc->request_line == ~0)
++              dwc->request_line = sconfig->slave_id;
++
++      convert_burst(&dwc->dma_sconfig.src_maxburst);
++      convert_burst(&dwc->dma_sconfig.dst_maxburst);
++
++      return 0;
++}
++
++static inline void dwc_chan_pause(struct dw_dma_chan *dwc)
++{
++      u32 cfglo = channel_readl(dwc, CFG_LO);
++      unsigned int count = 20;        /* timeout iterations */
++
++      channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
++      while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--)
++              udelay(2);
++
++      dwc->paused = true;
++}
++
++static inline void dwc_chan_resume(struct dw_dma_chan *dwc)
++{
++      u32 cfglo = channel_readl(dwc, CFG_LO);
++
++      channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
++
++      dwc->paused = false;
++}
++
++static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
++                     unsigned long arg)
++{
++      struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
++      struct dw_dma           *dw = to_dw_dma(chan->device);
++      struct dw_desc          *desc, *_desc;
++      unsigned long           flags;
++      LIST_HEAD(list);
++
++      if (cmd == DMA_PAUSE) {
++              spin_lock_irqsave(&dwc->lock, flags);
++
++              dwc_chan_pause(dwc);
++
++              spin_unlock_irqrestore(&dwc->lock, flags);
++      } else if (cmd == DMA_RESUME) {
++              if (!dwc->paused)
++                      return 0;
++
++              spin_lock_irqsave(&dwc->lock, flags);
++
++              dwc_chan_resume(dwc);
++
++              spin_unlock_irqrestore(&dwc->lock, flags);
++      } else if (cmd == DMA_TERMINATE_ALL) {
++              spin_lock_irqsave(&dwc->lock, flags);
++
++              clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
++
++              dwc_chan_disable(dw, dwc);
++
++              dwc_chan_resume(dwc);
++
++              /* active_list entries will end up before queued entries */
++              list_splice_init(&dwc->queue, &list);
++              list_splice_init(&dwc->active_list, &list);
++
++              spin_unlock_irqrestore(&dwc->lock, flags);
++
++              /* Flush all pending and queued descriptors */
++              list_for_each_entry_safe(desc, _desc, &list, desc_node)
++                      dwc_descriptor_complete(dwc, desc, false);
++      } else if (cmd == DMA_SLAVE_CONFIG) {
++              return set_runtime_config(chan, (struct dma_slave_config *)arg);
++      } else {
++              return -ENXIO;
++      }
++
++      return 0;
++}
++
++static inline u32 dwc_get_residue(struct dw_dma_chan *dwc)
++{
++      unsigned long flags;
++      u32 residue;
++
++      spin_lock_irqsave(&dwc->lock, flags);
++
++      residue = dwc->residue;
++      if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue)
++              residue -= dwc_get_sent(dwc);
++
++      spin_unlock_irqrestore(&dwc->lock, flags);
++      return residue;
++}
++
++static enum dma_status
++dwc_tx_status(struct dma_chan *chan,
++            dma_cookie_t cookie,
++            struct dma_tx_state *txstate)
++{
++      struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
++      enum dma_status         ret;
++
++      ret = dma_cookie_status(chan, cookie, txstate);
++      if (ret != DMA_SUCCESS) {
++              dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
++
++              ret = dma_cookie_status(chan, cookie, txstate);
++      }
++
++      if (ret != DMA_SUCCESS)
++              dma_set_residue(txstate, dwc_get_residue(dwc));
++
++      if (dwc->paused)
++              return DMA_PAUSED;
++
++      return ret;
++}
++
++static void dwc_issue_pending(struct dma_chan *chan)
++{
++      struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
++
++      if (!list_empty(&dwc->queue))
++              dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
++}
++
++static int dwc_alloc_chan_resources(struct dma_chan *chan)
++{
++      struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
++      struct dw_dma           *dw = to_dw_dma(chan->device);
++      struct dw_desc          *desc;
++      int                     i;
++      unsigned long           flags;
++
++      dev_vdbg(chan2dev(chan), "%s\n", __func__);
++
++      /* ASSERT:  channel is idle */
++      if (dma_readl(dw, CH_EN) & dwc->mask) {
++              dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
++              return -EIO;
++      }
++
++      dma_cookie_init(chan);
++
++      /*
++       * NOTE: some controllers may have additional features that we
++       * need to initialize here, like "scatter-gather" (which
++       * doesn't mean what you think it means), and status writeback.
++       */
++
++      dwc_set_masters(dwc);
++
++      spin_lock_irqsave(&dwc->lock, flags);
++      i = dwc->descs_allocated;
++      while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
++              dma_addr_t phys;
++
++              spin_unlock_irqrestore(&dwc->lock, flags);
++
++              desc = dma_pool_alloc(dw->desc_pool, GFP_ATOMIC, &phys);
++              if (!desc)
++                      goto err_desc_alloc;
++
++              memset(desc, 0, sizeof(struct dw_desc));
++
++              INIT_LIST_HEAD(&desc->tx_list);
++              dma_async_tx_descriptor_init(&desc->txd, chan);
++              desc->txd.tx_submit = dwc_tx_submit;
++              desc->txd.flags = DMA_CTRL_ACK;
++              desc->txd.phys = phys;
++
++              dwc_desc_put(dwc, desc);
++
++              spin_lock_irqsave(&dwc->lock, flags);
++              i = ++dwc->descs_allocated;
++      }
++
++      spin_unlock_irqrestore(&dwc->lock, flags);
++
++      dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
++
++      return i;
++
++err_desc_alloc:
++      dev_info(chan2dev(chan), "only allocated %d descriptors\n", i);
++
++      return i;
++}
++
++static void dwc_free_chan_resources(struct dma_chan *chan)
++{
++      struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
++      struct dw_dma           *dw = to_dw_dma(chan->device);
++      struct dw_desc          *desc, *_desc;
++      unsigned long           flags;
++      LIST_HEAD(list);
++
++      dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__,
++                      dwc->descs_allocated);
++
++      /* ASSERT:  channel is idle */
++      BUG_ON(!list_empty(&dwc->active_list));
++      BUG_ON(!list_empty(&dwc->queue));
++      BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
++
++      spin_lock_irqsave(&dwc->lock, flags);
++      list_splice_init(&dwc->free_list, &list);
++      dwc->descs_allocated = 0;
++      dwc->initialized = false;
++      dwc->request_line = ~0;
++
++      /* Disable interrupts */
++      channel_clear_bit(dw, MASK.XFER, dwc->mask);
++      channel_clear_bit(dw, MASK.ERROR, dwc->mask);
++
++      spin_unlock_irqrestore(&dwc->lock, flags);
++
++      list_for_each_entry_safe(desc, _desc, &list, desc_node) {
++              dev_vdbg(chan2dev(chan), "  freeing descriptor %p\n", desc);
++              dma_pool_free(dw->desc_pool, desc, desc->txd.phys);
++      }
++
++      dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
++}
++
++/*----------------------------------------------------------------------*/
++
++struct dw_dma_of_filter_args {
++      struct dw_dma *dw;
++      unsigned int req;
++      unsigned int src;
++      unsigned int dst;
++};
++
++static bool dw_dma_of_filter(struct dma_chan *chan, void *param)
++{
++      struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
++      struct dw_dma_of_filter_args *fargs = param;
++
++      /* Ensure the device matches our channel */
++        if (chan->device != &fargs->dw->dma)
++                return false;
++
++      dwc->request_line = fargs->req;
++      dwc->src_master = fargs->src;
++      dwc->dst_master = fargs->dst;
++
++      return true;
++}
++
++static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
++                                      struct of_dma *ofdma)
++{
++      struct dw_dma *dw = ofdma->of_dma_data;
++      struct dw_dma_of_filter_args fargs = {
++              .dw = dw,
++      };
++      dma_cap_mask_t cap;
++
++      if (dma_spec->args_count != 3)
++              return NULL;
++
++      fargs.req = dma_spec->args[0];
++      fargs.src = dma_spec->args[1];
++      fargs.dst = dma_spec->args[2];
++
++      if (WARN_ON(fargs.req >= DW_DMA_MAX_NR_REQUESTS ||
++                  fargs.src >= dw->nr_masters ||
++                  fargs.dst >= dw->nr_masters))
++              return NULL;
++
++      dma_cap_zero(cap);
++      dma_cap_set(DMA_SLAVE, cap);
++
++      /* TODO: there should be a simpler way to do this */
++      return dma_request_channel(cap, dw_dma_of_filter, &fargs);
++}
++
++#ifdef CONFIG_ACPI
++static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param)
++{
++      struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
++      struct acpi_dma_spec *dma_spec = param;
++
++      if (chan->device->dev != dma_spec->dev ||
++          chan->chan_id != dma_spec->chan_id)
++              return false;
++
++      dwc->request_line = dma_spec->slave_id;
++      dwc->src_master = dwc_get_sms(NULL);
++      dwc->dst_master = dwc_get_dms(NULL);
++
++      return true;
++}
++
++static void dw_dma_acpi_controller_register(struct dw_dma *dw)
++{
++      struct device *dev = dw->dma.dev;
++      struct acpi_dma_filter_info *info;
++      int ret;
++
++      info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
++      if (!info)
++              return;
++
++      dma_cap_zero(info->dma_cap);
++      dma_cap_set(DMA_SLAVE, info->dma_cap);
++      info->filter_fn = dw_dma_acpi_filter;
++
++      ret = devm_acpi_dma_controller_register(dev, acpi_dma_simple_xlate,
++                                              info);
++      if (ret)
++              dev_err(dev, "could not register acpi_dma_controller\n");
++}
++#else /* !CONFIG_ACPI */
++static inline void dw_dma_acpi_controller_register(struct dw_dma *dw) {}
++#endif /* !CONFIG_ACPI */
++
++/* --------------------- Cyclic DMA API extensions -------------------- */
++
++/**
++ * dw_dma_cyclic_start - start the cyclic DMA transfer
++ * @chan: the DMA channel to start
++ *
++ * Must be called with soft interrupts disabled. Returns zero on success or
++ * -errno on failure.
++ */
++int dw_dma_cyclic_start(struct dma_chan *chan)
++{
++      struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
++      struct dw_dma           *dw = to_dw_dma(dwc->chan.device);
++      unsigned long           flags;
++
++      if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
++              dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
++              return -ENODEV;
++      }
++
++      spin_lock_irqsave(&dwc->lock, flags);
++
++      /* Assert channel is idle */
++      if (dma_readl(dw, CH_EN) & dwc->mask) {
++              dev_err(chan2dev(&dwc->chan),
++                      "BUG: Attempted to start non-idle channel\n");
++              dwc_dump_chan_regs(dwc);
++              spin_unlock_irqrestore(&dwc->lock, flags);
++              return -EBUSY;
++      }
++
++      dma_writel(dw, CLEAR.ERROR, dwc->mask);
++      dma_writel(dw, CLEAR.XFER, dwc->mask);
++
++      /* Setup DMAC channel registers */
++      channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
++      channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
++      channel_writel(dwc, CTL_HI, 0);
++
++      channel_set_bit(dw, CH_EN, dwc->mask);
++
++      spin_unlock_irqrestore(&dwc->lock, flags);
++
++      return 0;
++}
++EXPORT_SYMBOL(dw_dma_cyclic_start);
++
++/**
++ * dw_dma_cyclic_stop - stop the cyclic DMA transfer
++ * @chan: the DMA channel to stop
++ *
++ * Must be called with soft interrupts disabled.
++ */
++void dw_dma_cyclic_stop(struct dma_chan *chan)
++{
++      struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
++      struct dw_dma           *dw = to_dw_dma(dwc->chan.device);
++      unsigned long           flags;
++
++      spin_lock_irqsave(&dwc->lock, flags);
++
++      dwc_chan_disable(dw, dwc);
++
++      spin_unlock_irqrestore(&dwc->lock, flags);
++}
++EXPORT_SYMBOL(dw_dma_cyclic_stop);
++
++/**
++ * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
++ * @chan: the DMA channel to prepare
++ * @buf_addr: physical DMA address where the buffer starts
++ * @buf_len: total number of bytes for the entire buffer
++ * @period_len: number of bytes for each period
++ * @direction: transfer direction, to or from device
++ *
++ * Must be called before trying to start the transfer. Returns a valid struct
++ * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
++ */
++struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
++              dma_addr_t buf_addr, size_t buf_len, size_t period_len,
++              enum dma_transfer_direction direction)
++{
++      struct dw_dma_chan              *dwc = to_dw_dma_chan(chan);
++      struct dma_slave_config         *sconfig = &dwc->dma_sconfig;
++      struct dw_cyclic_desc           *cdesc;
++      struct dw_cyclic_desc           *retval = NULL;
++      struct dw_desc                  *desc;
++      struct dw_desc                  *last = NULL;
++      unsigned long                   was_cyclic;
++      unsigned int                    reg_width;
++      unsigned int                    periods;
++      unsigned int                    i;
++      unsigned long                   flags;
++
++      spin_lock_irqsave(&dwc->lock, flags);
++      if (dwc->nollp) {
++              spin_unlock_irqrestore(&dwc->lock, flags);
++              dev_dbg(chan2dev(&dwc->chan),
++                              "channel doesn't support LLP transfers\n");
++              return ERR_PTR(-EINVAL);
++      }
++
++      if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
++              spin_unlock_irqrestore(&dwc->lock, flags);
++              dev_dbg(chan2dev(&dwc->chan),
++                              "queue and/or active list are not empty\n");
++              return ERR_PTR(-EBUSY);
++      }
++
++      was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
++      spin_unlock_irqrestore(&dwc->lock, flags);
++      if (was_cyclic) {
++              dev_dbg(chan2dev(&dwc->chan),
++                              "channel already prepared for cyclic DMA\n");
++              return ERR_PTR(-EBUSY);
++      }
++
++      retval = ERR_PTR(-EINVAL);
++
++      if (unlikely(!is_slave_direction(direction)))
++              goto out_err;
++
++      dwc->direction = direction;
++
++      if (direction == DMA_MEM_TO_DEV)
++              reg_width = __ffs(sconfig->dst_addr_width);
++      else
++              reg_width = __ffs(sconfig->src_addr_width);
++
++      periods = buf_len / period_len;
++
++      /* Check for too big/unaligned periods and unaligned DMA buffer. */
++      if (period_len > (dwc->block_size << reg_width))
++              goto out_err;
++      if (unlikely(period_len & ((1 << reg_width) - 1)))
++              goto out_err;
++      if (unlikely(buf_addr & ((1 << reg_width) - 1)))
++              goto out_err;
++
++      retval = ERR_PTR(-ENOMEM);
++
++      if (periods > NR_DESCS_PER_CHANNEL)
++              goto out_err;
++
++      cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
++      if (!cdesc)
++              goto out_err;
++
++      cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
++      if (!cdesc->desc)
++              goto out_err_alloc;
++
++      for (i = 0; i < periods; i++) {
++              desc = dwc_desc_get(dwc);
++              if (!desc)
++                      goto out_err_desc_get;
++
++              switch (direction) {
++              case DMA_MEM_TO_DEV:
++                      desc->lli.dar = sconfig->dst_addr;
++                      desc->lli.sar = buf_addr + (period_len * i);
++                      desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
++                                      | DWC_CTLL_DST_WIDTH(reg_width)
++                                      | DWC_CTLL_SRC_WIDTH(reg_width)
++                                      | DWC_CTLL_DST_FIX
++                                      | DWC_CTLL_SRC_INC
++                                      | DWC_CTLL_INT_EN);
++
++                      desc->lli.ctllo |= sconfig->device_fc ?
++                              DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
++                              DWC_CTLL_FC(DW_DMA_FC_D_M2P);
++
++                      break;
++              case DMA_DEV_TO_MEM:
++                      desc->lli.dar = buf_addr + (period_len * i);
++                      desc->lli.sar = sconfig->src_addr;
++                      desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
++                                      | DWC_CTLL_SRC_WIDTH(reg_width)
++                                      | DWC_CTLL_DST_WIDTH(reg_width)
++                                      | DWC_CTLL_DST_INC
++                                      | DWC_CTLL_SRC_FIX
++                                      | DWC_CTLL_INT_EN);
++
++                      desc->lli.ctllo |= sconfig->device_fc ?
++                              DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
++                              DWC_CTLL_FC(DW_DMA_FC_D_P2M);
++
++                      break;
++              default:
++                      break;
++              }
++
++              desc->lli.ctlhi = (period_len >> reg_width);
++              cdesc->desc[i] = desc;
++
++              if (last)
++                      last->lli.llp = desc->txd.phys;
++
++              last = desc;
++      }
++
++      /* Let's make a cyclic list */
++      last->lli.llp = cdesc->desc[0]->txd.phys;
++
++      dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu "
++                      "period %zu periods %d\n", (unsigned long long)buf_addr,
++                      buf_len, period_len, periods);
++
++      cdesc->periods = periods;
++      dwc->cdesc = cdesc;
++
++      return cdesc;
++
++out_err_desc_get:
++      while (i--)
++              dwc_desc_put(dwc, cdesc->desc[i]);
++out_err_alloc:
++      kfree(cdesc);
++out_err:
++      clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
++      return (struct dw_cyclic_desc *)retval;
++}
++EXPORT_SYMBOL(dw_dma_cyclic_prep);
++
++/**
++ * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
++ * @chan: the DMA channel to free
++ */
++void dw_dma_cyclic_free(struct dma_chan *chan)
++{
++      struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
++      struct dw_dma           *dw = to_dw_dma(dwc->chan.device);
++      struct dw_cyclic_desc   *cdesc = dwc->cdesc;
++      int                     i;
++      unsigned long           flags;
++
++      dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__);
++
++      if (!cdesc)
++              return;
++
++      spin_lock_irqsave(&dwc->lock, flags);
++
++      dwc_chan_disable(dw, dwc);
++
++      dma_writel(dw, CLEAR.ERROR, dwc->mask);
++      dma_writel(dw, CLEAR.XFER, dwc->mask);
++
++      spin_unlock_irqrestore(&dwc->lock, flags);
++
++      for (i = 0; i < cdesc->periods; i++)
++              dwc_desc_put(dwc, cdesc->desc[i]);
++
++      kfree(cdesc->desc);
++      kfree(cdesc);
++
++      clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
++}
++EXPORT_SYMBOL(dw_dma_cyclic_free);
++
++/*----------------------------------------------------------------------*/
++
++static void dw_dma_off(struct dw_dma *dw)
++{
++      int i;
++
++      dma_writel(dw, CFG, 0);
++
++      channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
++      channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
++      channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
++      channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
++
++      while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
++              cpu_relax();
++
++      for (i = 0; i < dw->dma.chancnt; i++)
++              dw->chan[i].initialized = false;
++}
++
++#ifdef CONFIG_OF
++static struct dw_dma_platform_data *
++dw_dma_parse_dt(struct platform_device *pdev)
++{
++      struct device_node *np = pdev->dev.of_node;
++      struct dw_dma_platform_data *pdata;
++      u32 tmp, arr[4];
++
++      if (!np) {
++              dev_err(&pdev->dev, "Missing DT data\n");
++              return NULL;
++      }
++
++      pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
++      if (!pdata)
++              return NULL;
++
++      if (of_property_read_u32(np, "dma-channels", &pdata->nr_channels))
++              return NULL;
++
++      if (of_property_read_bool(np, "is_private"))
++              pdata->is_private = true;
++
++      if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
++              pdata->chan_allocation_order = (unsigned char)tmp;
++
++      if (!of_property_read_u32(np, "chan_priority", &tmp))
++              pdata->chan_priority = tmp;
++
++      if (!of_property_read_u32(np, "block_size", &tmp))
++              pdata->block_size = tmp;
++
++      if (!of_property_read_u32(np, "dma-masters", &tmp)) {
++              if (tmp > 4)
++                      return NULL;
++
++              pdata->nr_masters = tmp;
++      }
++
++      if (!of_property_read_u32_array(np, "data_width", arr,
++                              pdata->nr_masters))
++              for (tmp = 0; tmp < pdata->nr_masters; tmp++)
++                      pdata->data_width[tmp] = arr[tmp];
++
++      return pdata;
++}
++#else
++static inline struct dw_dma_platform_data *
++dw_dma_parse_dt(struct platform_device *pdev)
++{
++      return NULL;
++}
++#endif
++
++static int dw_probe(struct platform_device *pdev)
++{
++      struct dw_dma_platform_data *pdata;
++      struct resource         *io;
++      struct dw_dma           *dw;
++      size_t                  size;
++      void __iomem            *regs;
++      bool                    autocfg;
++      unsigned int            dw_params;
++      unsigned int            nr_channels;
++      unsigned int            max_blk_size = 0;
++      int                     irq;
++      int                     err;
++      int                     i;
++
++      irq = platform_get_irq(pdev, 0);
++      if (irq < 0)
++              return irq;
++
++      io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++      regs = devm_ioremap_resource(&pdev->dev, io);
++      if (IS_ERR(regs))
++              return PTR_ERR(regs);
++
++      /* Apply default dma_mask if needed */
++      if (!pdev->dev.dma_mask) {
++              pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
++              pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
++      }
++
++      dw_params = dma_read_byaddr(regs, DW_PARAMS);
++      autocfg = dw_params >> DW_PARAMS_EN & 0x1;
++
++      dev_dbg(&pdev->dev, "DW_PARAMS: 0x%08x\n", dw_params);
++
++      pdata = dev_get_platdata(&pdev->dev);
++      if (!pdata)
++              pdata = dw_dma_parse_dt(pdev);
++
++      if (!pdata && autocfg) {
++              pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
++              if (!pdata)
++                      return -ENOMEM;
++
++              /* Fill platform data with the default values */
++              pdata->is_private = true;
++              pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
++              pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
++      } else if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
++              return -EINVAL;
++
++      if (autocfg)
++              nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1;
++      else
++              nr_channels = pdata->nr_channels;
++
++      size = sizeof(struct dw_dma) + nr_channels * sizeof(struct dw_dma_chan);
++      dw = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
++      if (!dw)
++              return -ENOMEM;
++
++      dw->clk = devm_clk_get(&pdev->dev, "hclk");
++      if (IS_ERR(dw->clk))
++              return PTR_ERR(dw->clk);
++      clk_prepare_enable(dw->clk);
++
++      dw->regs = regs;
++
++      /* Get hardware configuration parameters */
++      if (autocfg) {
++              max_blk_size = dma_readl(dw, MAX_BLK_SIZE);
++
++              dw->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1;
++              for (i = 0; i < dw->nr_masters; i++) {
++                      dw->data_width[i] =
++                              (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3) + 2;
++              }
++      } else {
++              dw->nr_masters = pdata->nr_masters;
++              memcpy(dw->data_width, pdata->data_width, 4);
++      }
++
++      /* Calculate all channel mask before DMA setup */
++      dw->all_chan_mask = (1 << nr_channels) - 1;
++
++      /* Force dma off, just in case */
++      dw_dma_off(dw);
++
++      /* Disable BLOCK interrupts as well */
++      channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
++
++      err = devm_request_irq(&pdev->dev, irq, dw_dma_interrupt, 0,
++                             "dw_dmac", dw);
++      if (err)
++              return err;
++
++      platform_set_drvdata(pdev, dw);
++
++      /* Create a pool of consistent memory blocks for hardware descriptors */
++      dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", &pdev->dev,
++                                       sizeof(struct dw_desc), 4, 0);
++      if (!dw->desc_pool) {
++              dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
++              return -ENOMEM;
++      }
++
++      tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
++
++      INIT_LIST_HEAD(&dw->dma.channels);
++      for (i = 0; i < nr_channels; i++) {
++              struct dw_dma_chan      *dwc = &dw->chan[i];
++              int                     r = nr_channels - i - 1;
++
++              dwc->chan.device = &dw->dma;
++              dma_cookie_init(&dwc->chan);
++              if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
++                      list_add_tail(&dwc->chan.device_node,
++                                      &dw->dma.channels);
++              else
++                      list_add(&dwc->chan.device_node, &dw->dma.channels);
++
++              /* 7 is highest priority & 0 is lowest. */
++              if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
++                      dwc->priority = r;
++              else
++                      dwc->priority = i;
++
++              dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
++              spin_lock_init(&dwc->lock);
++              dwc->mask = 1 << i;
++
++              INIT_LIST_HEAD(&dwc->active_list);
++              INIT_LIST_HEAD(&dwc->queue);
++              INIT_LIST_HEAD(&dwc->free_list);
++
++              channel_clear_bit(dw, CH_EN, dwc->mask);
++
++              dwc->direction = DMA_TRANS_NONE;
++              dwc->request_line = ~0;
++
++              /* Hardware configuration */
++              if (autocfg) {
++                      unsigned int dwc_params;
++
++                      dwc_params = dma_read_byaddr(regs + r * sizeof(u32),
++                                                   DWC_PARAMS);
++
++                      dev_dbg(&pdev->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
++                                          dwc_params);
++
++                      /* Decode maximum block size for given channel. The
++                       * stored 4 bit value represents blocks from 0x00 for 3
++                       * up to 0x0a for 4095. */
++                      dwc->block_size =
++                              (4 << ((max_blk_size >> 4 * i) & 0xf)) - 1;
++                      dwc->nollp =
++                              (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0;
++              } else {
++                      dwc->block_size = pdata->block_size;
++
++                      /* Check if channel supports multi block transfer */
++                      channel_writel(dwc, LLP, 0xfffffffc);
++                      dwc->nollp =
++                              (channel_readl(dwc, LLP) & 0xfffffffc) == 0;
++                      channel_writel(dwc, LLP, 0);
++              }
++      }
++
++      /* Clear all interrupts on all channels. */
++      dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
++      dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
++      dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
++      dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
++      dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
++
++      dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
++      dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
++      if (pdata->is_private)
++              dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
++      dw->dma.dev = &pdev->dev;
++      dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
++      dw->dma.device_free_chan_resources = dwc_free_chan_resources;
++
++      dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
++
++      dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
++      dw->dma.device_control = dwc_control;
++
++      dw->dma.device_tx_status = dwc_tx_status;
++      dw->dma.device_issue_pending = dwc_issue_pending;
++
++      dma_writel(dw, CFG, DW_CFG_DMA_EN);
++
++      dev_info(&pdev->dev, "DesignWare DMA Controller, %d channels\n",
++               nr_channels);
++
++      dma_async_device_register(&dw->dma);
++
++      if (pdev->dev.of_node) {
++              err = of_dma_controller_register(pdev->dev.of_node,
++                                               dw_dma_of_xlate, dw);
++              if (err)
++                      dev_err(&pdev->dev,
++                              "could not register of_dma_controller\n");
++      }
++
++      if (ACPI_HANDLE(&pdev->dev))
++              dw_dma_acpi_controller_register(dw);
++
++      return 0;
++}
++
++static int dw_remove(struct platform_device *pdev)
++{
++      struct dw_dma           *dw = platform_get_drvdata(pdev);
++      struct dw_dma_chan      *dwc, *_dwc;
++
++      if (pdev->dev.of_node)
++              of_dma_controller_free(pdev->dev.of_node);
++      dw_dma_off(dw);
++      dma_async_device_unregister(&dw->dma);
++
++      tasklet_kill(&dw->tasklet);
++
++      list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
++                      chan.device_node) {
++              list_del(&dwc->chan.device_node);
++              channel_clear_bit(dw, CH_EN, dwc->mask);
++      }
++
++      return 0;
++}
++
++static void dw_shutdown(struct platform_device *pdev)
++{
++      struct dw_dma   *dw = platform_get_drvdata(pdev);
++
++      dw_dma_off(dw);
++      clk_disable_unprepare(dw->clk);
++}
++
++static int dw_suspend_noirq(struct device *dev)
++{
++      struct platform_device *pdev = to_platform_device(dev);
++      struct dw_dma   *dw = platform_get_drvdata(pdev);
++
++      dw_dma_off(dw);
++      clk_disable_unprepare(dw->clk);
++
++      return 0;
++}
++
++static int dw_resume_noirq(struct device *dev)
++{
++      struct platform_device *pdev = to_platform_device(dev);
++      struct dw_dma   *dw = platform_get_drvdata(pdev);
++
++      clk_prepare_enable(dw->clk);
++      dma_writel(dw, CFG, DW_CFG_DMA_EN);
++
++      return 0;
++}
++
++static const struct dev_pm_ops dw_dev_pm_ops = {
++      .suspend_noirq = dw_suspend_noirq,
++      .resume_noirq = dw_resume_noirq,
++      .freeze_noirq = dw_suspend_noirq,
++      .thaw_noirq = dw_resume_noirq,
++      .restore_noirq = dw_resume_noirq,
++      .poweroff_noirq = dw_suspend_noirq,
++};
++
++#ifdef CONFIG_OF
++static const struct of_device_id dw_dma_of_id_table[] = {
++      { .compatible = "snps,dma-spear1340" },
++      {}
++};
++MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
++#endif
++
++#ifdef CONFIG_ACPI
++static const struct acpi_device_id dw_dma_acpi_id_table[] = {
++      { "INTL9C60", 0 },
++      { }
++};
++#endif
++
++static struct platform_driver dw_driver = {
++      .probe          = dw_probe,
++      .remove         = dw_remove,
++      .shutdown       = dw_shutdown,
++      .driver = {
++              .name   = "dw_dmac",
++              .pm     = &dw_dev_pm_ops,
++              .of_match_table = of_match_ptr(dw_dma_of_id_table),
++              .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table),
++      },
++};
++
++static int __init dw_init(void)
++{
++      return platform_driver_register(&dw_driver);
++}
++subsys_initcall(dw_init);
++
++static void __exit dw_exit(void)
++{
++      platform_driver_unregister(&dw_driver);
++}
++module_exit(dw_exit);
++
++MODULE_LICENSE("GPL v2");
++MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
++MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
++MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
+--- /dev/null
++++ b/drivers/dma/dw/dw_dmac_regs.h
+@@ -0,0 +1,311 @@
++/*
++ * Driver for the Synopsys DesignWare AHB DMA Controller
++ *
++ * Copyright (C) 2005-2007 Atmel Corporation
++ * Copyright (C) 2010-2011 ST Microelectronics
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/dmaengine.h>
++#include <linux/dw_dmac.h>
++
++#define DW_DMA_MAX_NR_CHANNELS        8
++#define DW_DMA_MAX_NR_REQUESTS        16
++
++/* flow controller */
++enum dw_dma_fc {
++      DW_DMA_FC_D_M2M,
++      DW_DMA_FC_D_M2P,
++      DW_DMA_FC_D_P2M,
++      DW_DMA_FC_D_P2P,
++      DW_DMA_FC_P_P2M,
++      DW_DMA_FC_SP_P2P,
++      DW_DMA_FC_P_M2P,
++      DW_DMA_FC_DP_P2P,
++};
++
++/*
++ * Redefine this macro to handle differences between 32- and 64-bit
++ * addressing, big vs. little endian, etc.
++ */
++#define DW_REG(name)          u32 name; u32 __pad_##name
++
++/* Hardware register definitions. */
++struct dw_dma_chan_regs {
++      DW_REG(SAR);            /* Source Address Register */
++      DW_REG(DAR);            /* Destination Address Register */
++      DW_REG(LLP);            /* Linked List Pointer */
++      u32     CTL_LO;         /* Control Register Low */
++      u32     CTL_HI;         /* Control Register High */
++      DW_REG(SSTAT);
++      DW_REG(DSTAT);
++      DW_REG(SSTATAR);
++      DW_REG(DSTATAR);
++      u32     CFG_LO;         /* Configuration Register Low */
++      u32     CFG_HI;         /* Configuration Register High */
++      DW_REG(SGR);
++      DW_REG(DSR);
++};
++
++struct dw_dma_irq_regs {
++      DW_REG(XFER);
++      DW_REG(BLOCK);
++      DW_REG(SRC_TRAN);
++      DW_REG(DST_TRAN);
++      DW_REG(ERROR);
++};
++
++struct dw_dma_regs {
++      /* per-channel registers */
++      struct dw_dma_chan_regs CHAN[DW_DMA_MAX_NR_CHANNELS];
++
++      /* irq handling */
++      struct dw_dma_irq_regs  RAW;            /* r */
++      struct dw_dma_irq_regs  STATUS;         /* r (raw & mask) */
++      struct dw_dma_irq_regs  MASK;           /* rw (set = irq enabled) */
++      struct dw_dma_irq_regs  CLEAR;          /* w (ack, affects "raw") */
++
++      DW_REG(STATUS_INT);                     /* r */
++
++      /* software handshaking */
++      DW_REG(REQ_SRC);
++      DW_REG(REQ_DST);
++      DW_REG(SGL_REQ_SRC);
++      DW_REG(SGL_REQ_DST);
++      DW_REG(LAST_SRC);
++      DW_REG(LAST_DST);
++
++      /* miscellaneous */
++      DW_REG(CFG);
++      DW_REG(CH_EN);
++      DW_REG(ID);
++      DW_REG(TEST);
++
++      /* reserved */
++      DW_REG(__reserved0);
++      DW_REG(__reserved1);
++
++      /* optional encoded params, 0x3c8..0x3f7 */
++      u32     __reserved;
++
++      /* per-channel configuration registers */
++      u32     DWC_PARAMS[DW_DMA_MAX_NR_CHANNELS];
++      u32     MULTI_BLK_TYPE;
++      u32     MAX_BLK_SIZE;
++
++      /* top-level parameters */
++      u32     DW_PARAMS;
++};
++
++#ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO
++#define dma_readl_native ioread32be
++#define dma_writel_native iowrite32be
++#else
++#define dma_readl_native readl
++#define dma_writel_native writel
++#endif
++
++/* To access the registers in early stage of probe */
++#define dma_read_byaddr(addr, name) \
++      dma_readl_native((addr) + offsetof(struct dw_dma_regs, name))
++
++/* Bitfields in DW_PARAMS */
++#define DW_PARAMS_NR_CHAN     8               /* number of channels */
++#define DW_PARAMS_NR_MASTER   11              /* number of AHB masters */
++#define DW_PARAMS_DATA_WIDTH(n)       (15 + 2 * (n))
++#define DW_PARAMS_DATA_WIDTH1 15              /* master 1 data width */
++#define DW_PARAMS_DATA_WIDTH2 17              /* master 2 data width */
++#define DW_PARAMS_DATA_WIDTH3 19              /* master 3 data width */
++#define DW_PARAMS_DATA_WIDTH4 21              /* master 4 data width */
++#define DW_PARAMS_EN          28              /* encoded parameters */
++
++/* Bitfields in DWC_PARAMS */
++#define DWC_PARAMS_MBLK_EN    11              /* multi block transfer */
++
++/* Bitfields in CTL_LO */
++#define DWC_CTLL_INT_EN               (1 << 0)        /* irqs enabled? */
++#define DWC_CTLL_DST_WIDTH(n) ((n)<<1)        /* bytes per element */
++#define DWC_CTLL_SRC_WIDTH(n) ((n)<<4)
++#define DWC_CTLL_DST_INC      (0<<7)          /* DAR update/not */
++#define DWC_CTLL_DST_DEC      (1<<7)
++#define DWC_CTLL_DST_FIX      (2<<7)
++#define DWC_CTLL_SRC_INC      (0<<7)          /* SAR update/not */
++#define DWC_CTLL_SRC_DEC      (1<<9)
++#define DWC_CTLL_SRC_FIX      (2<<9)
++#define DWC_CTLL_DST_MSIZE(n) ((n)<<11)       /* burst, #elements */
++#define DWC_CTLL_SRC_MSIZE(n) ((n)<<14)
++#define DWC_CTLL_S_GATH_EN    (1 << 17)       /* src gather, !FIX */
++#define DWC_CTLL_D_SCAT_EN    (1 << 18)       /* dst scatter, !FIX */
++#define DWC_CTLL_FC(n)                ((n) << 20)
++#define DWC_CTLL_FC_M2M               (0 << 20)       /* mem-to-mem */
++#define DWC_CTLL_FC_M2P               (1 << 20)       /* mem-to-periph */
++#define DWC_CTLL_FC_P2M               (2 << 20)       /* periph-to-mem */
++#define DWC_CTLL_FC_P2P               (3 << 20)       /* periph-to-periph */
++/* plus 4 transfer types for peripheral-as-flow-controller */
++#define DWC_CTLL_DMS(n)               ((n)<<23)       /* dst master select */
++#define DWC_CTLL_SMS(n)               ((n)<<25)       /* src master select */
++#define DWC_CTLL_LLP_D_EN     (1 << 27)       /* dest block chain */
++#define DWC_CTLL_LLP_S_EN     (1 << 28)       /* src block chain */
++
++/* Bitfields in CTL_HI */
++#define DWC_CTLH_DONE         0x00001000
++#define DWC_CTLH_BLOCK_TS_MASK        0x00000fff
++
++/* Bitfields in CFG_LO. Platform-configurable bits are in <linux/dw_dmac.h> */
++#define DWC_CFGL_CH_PRIOR_MASK        (0x7 << 5)      /* priority mask */
++#define DWC_CFGL_CH_PRIOR(x)  ((x) << 5)      /* priority */
++#define DWC_CFGL_CH_SUSP      (1 << 8)        /* pause xfer */
++#define DWC_CFGL_FIFO_EMPTY   (1 << 9)        /* pause xfer */
++#define DWC_CFGL_HS_DST               (1 << 10)       /* handshake w/dst */
++#define DWC_CFGL_HS_SRC               (1 << 11)       /* handshake w/src */
++#define DWC_CFGL_MAX_BURST(x) ((x) << 20)
++#define DWC_CFGL_RELOAD_SAR   (1 << 30)
++#define DWC_CFGL_RELOAD_DAR   (1 << 31)
++
++/* Bitfields in CFG_HI. Platform-configurable bits are in <linux/dw_dmac.h> */
++#define DWC_CFGH_DS_UPD_EN    (1 << 5)
++#define DWC_CFGH_SS_UPD_EN    (1 << 6)
++
++/* Bitfields in SGR */
++#define DWC_SGR_SGI(x)                ((x) << 0)
++#define DWC_SGR_SGC(x)                ((x) << 20)
++
++/* Bitfields in DSR */
++#define DWC_DSR_DSI(x)                ((x) << 0)
++#define DWC_DSR_DSC(x)                ((x) << 20)
++
++/* Bitfields in CFG */
++#define DW_CFG_DMA_EN         (1 << 0)
++
++enum dw_dmac_flags {
++      DW_DMA_IS_CYCLIC = 0,
++      DW_DMA_IS_SOFT_LLP = 1,
++};
++
++struct dw_dma_chan {
++      struct dma_chan                 chan;
++      void __iomem                    *ch_regs;
++      u8                              mask;
++      u8                              priority;
++      enum dma_transfer_direction     direction;
++      bool                            paused;
++      bool                            initialized;
++
++      /* software emulation of the LLP transfers */
++      struct list_head        *tx_node_active;
++
++      spinlock_t              lock;
++
++      /* these other elements are all protected by lock */
++      unsigned long           flags;
++      struct list_head        active_list;
++      struct list_head        queue;
++      struct list_head        free_list;
++      u32                     residue;
++      struct dw_cyclic_desc   *cdesc;
++
++      unsigned int            descs_allocated;
++
++      /* hardware configuration */
++      unsigned int            block_size;
++      bool                    nollp;
++
++      /* custom slave configuration */
++      unsigned int            request_line;
++      unsigned char           src_master;
++      unsigned char           dst_master;
++
++      /* configuration passed via DMA_SLAVE_CONFIG */
++      struct dma_slave_config dma_sconfig;
++};
++
++static inline struct dw_dma_chan_regs __iomem *
++__dwc_regs(struct dw_dma_chan *dwc)
++{
++      return dwc->ch_regs;
++}
++
++#define channel_readl(dwc, name) \
++      dma_readl_native(&(__dwc_regs(dwc)->name))
++#define channel_writel(dwc, name, val) \
++      dma_writel_native((val), &(__dwc_regs(dwc)->name))
++
++static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
++{
++      return container_of(chan, struct dw_dma_chan, chan);
++}
++
++struct dw_dma {
++      struct dma_device       dma;
++      void __iomem            *regs;
++      struct dma_pool         *desc_pool;
++      struct tasklet_struct   tasklet;
++      struct clk              *clk;
++
++      u8                      all_chan_mask;
++
++      /* hardware configuration */
++      unsigned char           nr_masters;
++      unsigned char           data_width[4];
++
++      struct dw_dma_chan      chan[0];
++};
++
++static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
++{
++      return dw->regs;
++}
++
++#define dma_readl(dw, name) \
++      dma_readl_native(&(__dw_regs(dw)->name))
++#define dma_writel(dw, name, val) \
++      dma_writel_native((val), &(__dw_regs(dw)->name))
++
++#define channel_set_bit(dw, reg, mask) \
++      dma_writel(dw, reg, ((mask) << 8) | (mask))
++#define channel_clear_bit(dw, reg, mask) \
++      dma_writel(dw, reg, ((mask) << 8) | 0)
++
++static inline struct dw_dma *to_dw_dma(struct dma_device *ddev)
++{
++      return container_of(ddev, struct dw_dma, dma);
++}
++
++/* LLI == Linked List Item; a.k.a. DMA block descriptor */
++struct dw_lli {
++      /* values that are not changed by hardware */
++      u32             sar;
++      u32             dar;
++      u32             llp;            /* chain to next lli */
++      u32             ctllo;
++      /* values that may get written back: */
++      u32             ctlhi;
++      /* sstat and dstat can snapshot peripheral register state.
++       * silicon config may discard either or both...
++       */
++      u32             sstat;
++      u32             dstat;
++};
++
++struct dw_desc {
++      /* FIRST values the hardware uses */
++      struct dw_lli                   lli;
++
++      /* THEN values for driver housekeeping */
++      struct list_head                desc_node;
++      struct list_head                tx_list;
++      struct dma_async_tx_descriptor  txd;
++      size_t                          len;
++      size_t                          total_len;
++};
++
++#define to_dw_desc(h) list_entry(h, struct dw_desc, desc_node)
++
++static inline struct dw_desc *
++txd_to_dw_desc(struct dma_async_tx_descriptor *txd)
++{
++      return container_of(txd, struct dw_desc, txd);
++}
+--- a/drivers/dma/dw_dmac.c
++++ /dev/null
+@@ -1,1969 +0,0 @@
+-/*
+- * Core driver for the Synopsys DesignWare DMA Controller
+- *
+- * Copyright (C) 2007-2008 Atmel Corporation
+- * Copyright (C) 2010-2011 ST Microelectronics
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+- */
+-
+-#include <linux/bitops.h>
+-#include <linux/clk.h>
+-#include <linux/delay.h>
+-#include <linux/dmaengine.h>
+-#include <linux/dma-mapping.h>
+-#include <linux/dmapool.h>
+-#include <linux/err.h>
+-#include <linux/init.h>
+-#include <linux/interrupt.h>
+-#include <linux/io.h>
+-#include <linux/of.h>
+-#include <linux/of_dma.h>
+-#include <linux/mm.h>
+-#include <linux/module.h>
+-#include <linux/platform_device.h>
+-#include <linux/slab.h>
+-#include <linux/acpi.h>
+-#include <linux/acpi_dma.h>
+-
+-#include "dw_dmac_regs.h"
+-#include "dmaengine.h"
+-
+-/*
+- * This supports the Synopsys "DesignWare AHB Central DMA Controller",
+- * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
+- * of which use ARM any more).  See the "Databook" from Synopsys for
+- * information beyond what licensees probably provide.
+- *
+- * The driver has currently been tested only with the Atmel AT32AP7000,
+- * which does not support descriptor writeback.
+- */
+-
+-static inline unsigned int dwc_get_dms(struct dw_dma_slave *slave)
+-{
+-      return slave ? slave->dst_master : 0;
+-}
+-
+-static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave)
+-{
+-      return slave ? slave->src_master : 1;
+-}
+-
+-static inline void dwc_set_masters(struct dw_dma_chan *dwc)
+-{
+-      struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+-      struct dw_dma_slave *dws = dwc->chan.private;
+-      unsigned char mmax = dw->nr_masters - 1;
+-
+-      if (dwc->request_line == ~0) {
+-              dwc->src_master = min_t(unsigned char, mmax, dwc_get_sms(dws));
+-              dwc->dst_master = min_t(unsigned char, mmax, dwc_get_dms(dws));
+-      }
+-}
+-
+-#define DWC_DEFAULT_CTLLO(_chan) ({                           \
+-              struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan);       \
+-              struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
+-              bool _is_slave = is_slave_direction(_dwc->direction);   \
+-              u8 _smsize = _is_slave ? _sconfig->src_maxburst :       \
+-                      DW_DMA_MSIZE_16;                        \
+-              u8 _dmsize = _is_slave ? _sconfig->dst_maxburst :       \
+-                      DW_DMA_MSIZE_16;                        \
+-                                                              \
+-              (DWC_CTLL_DST_MSIZE(_dmsize)                    \
+-               | DWC_CTLL_SRC_MSIZE(_smsize)                  \
+-               | DWC_CTLL_LLP_D_EN                            \
+-               | DWC_CTLL_LLP_S_EN                            \
+-               | DWC_CTLL_DMS(_dwc->dst_master)               \
+-               | DWC_CTLL_SMS(_dwc->src_master));             \
+-      })
+-
+-/*
+- * Number of descriptors to allocate for each channel. This should be
+- * made configurable somehow; preferably, the clients (at least the
+- * ones using slave transfers) should be able to give us a hint.
+- */
+-#define NR_DESCS_PER_CHANNEL  64
+-
+-/*----------------------------------------------------------------------*/
+-
+-static struct device *chan2dev(struct dma_chan *chan)
+-{
+-      return &chan->dev->device;
+-}
+-static struct device *chan2parent(struct dma_chan *chan)
+-{
+-      return chan->dev->device.parent;
+-}
+-
+-static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
+-{
+-      return to_dw_desc(dwc->active_list.next);
+-}
+-
+-static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
+-{
+-      struct dw_desc *desc, *_desc;
+-      struct dw_desc *ret = NULL;
+-      unsigned int i = 0;
+-      unsigned long flags;
+-
+-      spin_lock_irqsave(&dwc->lock, flags);
+-      list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
+-              i++;
+-              if (async_tx_test_ack(&desc->txd)) {
+-                      list_del(&desc->desc_node);
+-                      ret = desc;
+-                      break;
+-              }
+-              dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
+-      }
+-      spin_unlock_irqrestore(&dwc->lock, flags);
+-
+-      dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
+-
+-      return ret;
+-}
+-
+-/*
+- * Move a descriptor, including any children, to the free list.
+- * `desc' must not be on any lists.
+- */
+-static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
+-{
+-      unsigned long flags;
+-
+-      if (desc) {
+-              struct dw_desc *child;
+-
+-              spin_lock_irqsave(&dwc->lock, flags);
+-              list_for_each_entry(child, &desc->tx_list, desc_node)
+-                      dev_vdbg(chan2dev(&dwc->chan),
+-                                      "moving child desc %p to freelist\n",
+-                                      child);
+-              list_splice_init(&desc->tx_list, &dwc->free_list);
+-              dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
+-              list_add(&desc->desc_node, &dwc->free_list);
+-              spin_unlock_irqrestore(&dwc->lock, flags);
+-      }
+-}
+-
+-static void dwc_initialize(struct dw_dma_chan *dwc)
+-{
+-      struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+-      struct dw_dma_slave *dws = dwc->chan.private;
+-      u32 cfghi = DWC_CFGH_FIFO_MODE;
+-      u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
+-
+-      if (dwc->initialized == true)
+-              return;
+-
+-      if (dws) {
+-              /*
+-               * We need controller-specific data to set up slave
+-               * transfers.
+-               */
+-              BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
+-
+-              cfghi = dws->cfg_hi;
+-              cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
+-      } else {
+-              if (dwc->direction == DMA_MEM_TO_DEV)
+-                      cfghi = DWC_CFGH_DST_PER(dwc->request_line);
+-              else if (dwc->direction == DMA_DEV_TO_MEM)
+-                      cfghi = DWC_CFGH_SRC_PER(dwc->request_line);
+-      }
+-
+-      channel_writel(dwc, CFG_LO, cfglo);
+-      channel_writel(dwc, CFG_HI, cfghi);
+-
+-      /* Enable interrupts */
+-      channel_set_bit(dw, MASK.XFER, dwc->mask);
+-      channel_set_bit(dw, MASK.ERROR, dwc->mask);
+-
+-      dwc->initialized = true;
+-}
+-
+-/*----------------------------------------------------------------------*/
+-
+-static inline unsigned int dwc_fast_fls(unsigned long long v)
+-{
+-      /*
+-       * We can be a lot more clever here, but this should take care
+-       * of the most common optimization.
+-       */
+-      if (!(v & 7))
+-              return 3;
+-      else if (!(v & 3))
+-              return 2;
+-      else if (!(v & 1))
+-              return 1;
+-      return 0;
+-}
+-
+-static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
+-{
+-      dev_err(chan2dev(&dwc->chan),
+-              "  SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
+-              channel_readl(dwc, SAR),
+-              channel_readl(dwc, DAR),
+-              channel_readl(dwc, LLP),
+-              channel_readl(dwc, CTL_HI),
+-              channel_readl(dwc, CTL_LO));
+-}
+-
+-static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
+-{
+-      channel_clear_bit(dw, CH_EN, dwc->mask);
+-      while (dma_readl(dw, CH_EN) & dwc->mask)
+-              cpu_relax();
+-}
+-
+-/*----------------------------------------------------------------------*/
+-
+-/* Perform single block transfer */
+-static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
+-                                     struct dw_desc *desc)
+-{
+-      struct dw_dma   *dw = to_dw_dma(dwc->chan.device);
+-      u32             ctllo;
+-
+-      /* Software emulation of LLP mode relies on interrupts to continue
+-       * multi block transfer. */
+-      ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN;
+-
+-      channel_writel(dwc, SAR, desc->lli.sar);
+-      channel_writel(dwc, DAR, desc->lli.dar);
+-      channel_writel(dwc, CTL_LO, ctllo);
+-      channel_writel(dwc, CTL_HI, desc->lli.ctlhi);
+-      channel_set_bit(dw, CH_EN, dwc->mask);
+-
+-      /* Move pointer to next descriptor */
+-      dwc->tx_node_active = dwc->tx_node_active->next;
+-}
+-
+-/* Called with dwc->lock held and bh disabled */
+-static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
+-{
+-      struct dw_dma   *dw = to_dw_dma(dwc->chan.device);
+-      unsigned long   was_soft_llp;
+-
+-      /* ASSERT:  channel is idle */
+-      if (dma_readl(dw, CH_EN) & dwc->mask) {
+-              dev_err(chan2dev(&dwc->chan),
+-                      "BUG: Attempted to start non-idle channel\n");
+-              dwc_dump_chan_regs(dwc);
+-
+-              /* The tasklet will hopefully advance the queue... */
+-              return;
+-      }
+-
+-      if (dwc->nollp) {
+-              was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP,
+-                                              &dwc->flags);
+-              if (was_soft_llp) {
+-                      dev_err(chan2dev(&dwc->chan),
+-                              "BUG: Attempted to start new LLP transfer "
+-                              "inside ongoing one\n");
+-                      return;
+-              }
+-
+-              dwc_initialize(dwc);
+-
+-              dwc->residue = first->total_len;
+-              dwc->tx_node_active = &first->tx_list;
+-
+-              /* Submit first block */
+-              dwc_do_single_block(dwc, first);
+-
+-              return;
+-      }
+-
+-      dwc_initialize(dwc);
+-
+-      channel_writel(dwc, LLP, first->txd.phys);
+-      channel_writel(dwc, CTL_LO,
+-                      DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
+-      channel_writel(dwc, CTL_HI, 0);
+-      channel_set_bit(dw, CH_EN, dwc->mask);
+-}
+-
+-/*----------------------------------------------------------------------*/
+-
+-static void
+-dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
+-              bool callback_required)
+-{
+-      dma_async_tx_callback           callback = NULL;
+-      void                            *param = NULL;
+-      struct dma_async_tx_descriptor  *txd = &desc->txd;
+-      struct dw_desc                  *child;
+-      unsigned long                   flags;
+-
+-      dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
+-
+-      spin_lock_irqsave(&dwc->lock, flags);
+-      dma_cookie_complete(txd);
+-      if (callback_required) {
+-              callback = txd->callback;
+-              param = txd->callback_param;
+-      }
+-
+-      /* async_tx_ack */
+-      list_for_each_entry(child, &desc->tx_list, desc_node)
+-              async_tx_ack(&child->txd);
+-      async_tx_ack(&desc->txd);
+-
+-      list_splice_init(&desc->tx_list, &dwc->free_list);
+-      list_move(&desc->desc_node, &dwc->free_list);
+-
+-      if (!is_slave_direction(dwc->direction)) {
+-              struct device *parent = chan2parent(&dwc->chan);
+-              if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+-                      if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
+-                              dma_unmap_single(parent, desc->lli.dar,
+-                                      desc->total_len, DMA_FROM_DEVICE);
+-                      else
+-                              dma_unmap_page(parent, desc->lli.dar,
+-                                      desc->total_len, DMA_FROM_DEVICE);
+-              }
+-              if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+-                      if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
+-                              dma_unmap_single(parent, desc->lli.sar,
+-                                      desc->total_len, DMA_TO_DEVICE);
+-                      else
+-                              dma_unmap_page(parent, desc->lli.sar,
+-                                      desc->total_len, DMA_TO_DEVICE);
+-              }
+-      }
+-
+-      spin_unlock_irqrestore(&dwc->lock, flags);
+-
+-      if (callback)
+-              callback(param);
+-}
+-
+-static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
+-{
+-      struct dw_desc *desc, *_desc;
+-      LIST_HEAD(list);
+-      unsigned long flags;
+-
+-      spin_lock_irqsave(&dwc->lock, flags);
+-      if (dma_readl(dw, CH_EN) & dwc->mask) {
+-              dev_err(chan2dev(&dwc->chan),
+-                      "BUG: XFER bit set, but channel not idle!\n");
+-
+-              /* Try to continue after resetting the channel... */
+-              dwc_chan_disable(dw, dwc);
+-      }
+-
+-      /*
+-       * Submit queued descriptors ASAP, i.e. before we go through
+-       * the completed ones.
+-       */
+-      list_splice_init(&dwc->active_list, &list);
+-      if (!list_empty(&dwc->queue)) {
+-              list_move(dwc->queue.next, &dwc->active_list);
+-              dwc_dostart(dwc, dwc_first_active(dwc));
+-      }
+-
+-      spin_unlock_irqrestore(&dwc->lock, flags);
+-
+-      list_for_each_entry_safe(desc, _desc, &list, desc_node)
+-              dwc_descriptor_complete(dwc, desc, true);
+-}
+-
+-/* Returns how many bytes were already received from source */
+-static inline u32 dwc_get_sent(struct dw_dma_chan *dwc)
+-{
+-      u32 ctlhi = channel_readl(dwc, CTL_HI);
+-      u32 ctllo = channel_readl(dwc, CTL_LO);
+-
+-      return (ctlhi & DWC_CTLH_BLOCK_TS_MASK) * (1 << (ctllo >> 4 & 7));
+-}
+-
+-static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
+-{
+-      dma_addr_t llp;
+-      struct dw_desc *desc, *_desc;
+-      struct dw_desc *child;
+-      u32 status_xfer;
+-      unsigned long flags;
+-
+-      spin_lock_irqsave(&dwc->lock, flags);
+-      llp = channel_readl(dwc, LLP);
+-      status_xfer = dma_readl(dw, RAW.XFER);
+-
+-      if (status_xfer & dwc->mask) {
+-              /* Everything we've submitted is done */
+-              dma_writel(dw, CLEAR.XFER, dwc->mask);
+-
+-              if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
+-                      struct list_head *head, *active = dwc->tx_node_active;
+-
+-                      /*
+-                       * We are inside first active descriptor.
+-                       * Otherwise something is really wrong.
+-                       */
+-                      desc = dwc_first_active(dwc);
+-
+-                      head = &desc->tx_list;
+-                      if (active != head) {
+-                              /* Update desc to reflect last sent one */
+-                              if (active != head->next)
+-                                      desc = to_dw_desc(active->prev);
+-
+-                              dwc->residue -= desc->len;
+-
+-                              child = to_dw_desc(active);
+-
+-                              /* Submit next block */
+-                              dwc_do_single_block(dwc, child);
+-
+-                              spin_unlock_irqrestore(&dwc->lock, flags);
+-                              return;
+-                      }
+-
+-                      /* We are done here */
+-                      clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
+-              }
+-
+-              dwc->residue = 0;
+-
+-              spin_unlock_irqrestore(&dwc->lock, flags);
+-
+-              dwc_complete_all(dw, dwc);
+-              return;
+-      }
+-
+-      if (list_empty(&dwc->active_list)) {
+-              dwc->residue = 0;
+-              spin_unlock_irqrestore(&dwc->lock, flags);
+-              return;
+-      }
+-
+-      if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
+-              dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__);
+-              spin_unlock_irqrestore(&dwc->lock, flags);
+-              return;
+-      }
+-
+-      dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__,
+-                      (unsigned long long)llp);
+-
+-      list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
+-              /* Initial residue value */
+-              dwc->residue = desc->total_len;
+-
+-              /* Check first descriptors addr */
+-              if (desc->txd.phys == llp) {
+-                      spin_unlock_irqrestore(&dwc->lock, flags);
+-                      return;
+-              }
+-
+-              /* Check first descriptors llp */
+-              if (desc->lli.llp == llp) {
+-                      /* This one is currently in progress */
+-                      dwc->residue -= dwc_get_sent(dwc);
+-                      spin_unlock_irqrestore(&dwc->lock, flags);
+-                      return;
+-              }
+-
+-              dwc->residue -= desc->len;
+-              list_for_each_entry(child, &desc->tx_list, desc_node) {
+-                      if (child->lli.llp == llp) {
+-                              /* Currently in progress */
+-                              dwc->residue -= dwc_get_sent(dwc);
+-                              spin_unlock_irqrestore(&dwc->lock, flags);
+-                              return;
+-                      }
+-                      dwc->residue -= child->len;
+-              }
+-
+-              /*
+-               * No descriptors so far seem to be in progress, i.e.
+-               * this one must be done.
+-               */
+-              spin_unlock_irqrestore(&dwc->lock, flags);
+-              dwc_descriptor_complete(dwc, desc, true);
+-              spin_lock_irqsave(&dwc->lock, flags);
+-      }
+-
+-      dev_err(chan2dev(&dwc->chan),
+-              "BUG: All descriptors done, but channel not idle!\n");
+-
+-      /* Try to continue after resetting the channel... */
+-      dwc_chan_disable(dw, dwc);
+-
+-      if (!list_empty(&dwc->queue)) {
+-              list_move(dwc->queue.next, &dwc->active_list);
+-              dwc_dostart(dwc, dwc_first_active(dwc));
+-      }
+-      spin_unlock_irqrestore(&dwc->lock, flags);
+-}
+-
+-static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
+-{
+-      dev_crit(chan2dev(&dwc->chan), "  desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
+-               lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo);
+-}
+-
+-static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
+-{
+-      struct dw_desc *bad_desc;
+-      struct dw_desc *child;
+-      unsigned long flags;
+-
+-      dwc_scan_descriptors(dw, dwc);
+-
+-      spin_lock_irqsave(&dwc->lock, flags);
+-
+-      /*
+-       * The descriptor currently at the head of the active list is
+-       * borked. Since we don't have any way to report errors, we'll
+-       * just have to scream loudly and try to carry on.
+-       */
+-      bad_desc = dwc_first_active(dwc);
+-      list_del_init(&bad_desc->desc_node);
+-      list_move(dwc->queue.next, dwc->active_list.prev);
+-
+-      /* Clear the error flag and try to restart the controller */
+-      dma_writel(dw, CLEAR.ERROR, dwc->mask);
+-      if (!list_empty(&dwc->active_list))
+-              dwc_dostart(dwc, dwc_first_active(dwc));
+-
+-      /*
+-       * WARN may seem harsh, but since this only happens
+-       * when someone submits a bad physical address in a
+-       * descriptor, we should consider ourselves lucky that the
+-       * controller flagged an error instead of scribbling over
+-       * random memory locations.
+-       */
+-      dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n"
+-                                     "  cookie: %d\n", bad_desc->txd.cookie);
+-      dwc_dump_lli(dwc, &bad_desc->lli);
+-      list_for_each_entry(child, &bad_desc->tx_list, desc_node)
+-              dwc_dump_lli(dwc, &child->lli);
+-
+-      spin_unlock_irqrestore(&dwc->lock, flags);
+-
+-      /* Pretend the descriptor completed successfully */
+-      dwc_descriptor_complete(dwc, bad_desc, true);
+-}
+-
+-/* --------------------- Cyclic DMA API extensions -------------------- */
+-
+-dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
+-{
+-      struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+-      return channel_readl(dwc, SAR);
+-}
+-EXPORT_SYMBOL(dw_dma_get_src_addr);
+-
+-dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
+-{
+-      struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+-      return channel_readl(dwc, DAR);
+-}
+-EXPORT_SYMBOL(dw_dma_get_dst_addr);
+-
+-/* Called with dwc->lock held and all DMAC interrupts disabled */
+-static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
+-              u32 status_err, u32 status_xfer)
+-{
+-      unsigned long flags;
+-
+-      if (dwc->mask) {
+-              void (*callback)(void *param);
+-              void *callback_param;
+-
+-              dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
+-                              channel_readl(dwc, LLP));
+-
+-              callback = dwc->cdesc->period_callback;
+-              callback_param = dwc->cdesc->period_callback_param;
+-
+-              if (callback)
+-                      callback(callback_param);
+-      }
+-
+-      /*
+-       * Error and transfer complete are highly unlikely, and will most
+-       * likely be due to a configuration error by the user.
+-       */
+-      if (unlikely(status_err & dwc->mask) ||
+-                      unlikely(status_xfer & dwc->mask)) {
+-              int i;
+-
+-              dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
+-                              "interrupt, stopping DMA transfer\n",
+-                              status_xfer ? "xfer" : "error");
+-
+-              spin_lock_irqsave(&dwc->lock, flags);
+-
+-              dwc_dump_chan_regs(dwc);
+-
+-              dwc_chan_disable(dw, dwc);
+-
+-              /* Make sure DMA does not restart by loading a new list */
+-              channel_writel(dwc, LLP, 0);
+-              channel_writel(dwc, CTL_LO, 0);
+-              channel_writel(dwc, CTL_HI, 0);
+-
+-              dma_writel(dw, CLEAR.ERROR, dwc->mask);
+-              dma_writel(dw, CLEAR.XFER, dwc->mask);
+-
+-              for (i = 0; i < dwc->cdesc->periods; i++)
+-                      dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
+-
+-              spin_unlock_irqrestore(&dwc->lock, flags);
+-      }
+-}
+-
+-/* ------------------------------------------------------------------------- */
+-
+-static void dw_dma_tasklet(unsigned long data)
+-{
+-      struct dw_dma *dw = (struct dw_dma *)data;
+-      struct dw_dma_chan *dwc;
+-      u32 status_xfer;
+-      u32 status_err;
+-      int i;
+-
+-      status_xfer = dma_readl(dw, RAW.XFER);
+-      status_err = dma_readl(dw, RAW.ERROR);
+-
+-      dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err);
+-
+-      for (i = 0; i < dw->dma.chancnt; i++) {
+-              dwc = &dw->chan[i];
+-              if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
+-                      dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
+-              else if (status_err & (1 << i))
+-                      dwc_handle_error(dw, dwc);
+-              else if (status_xfer & (1 << i))
+-                      dwc_scan_descriptors(dw, dwc);
+-      }
+-
+-      /*
+-       * Re-enable interrupts.
+-       */
+-      channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
+-      channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
+-}
+-
+-static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
+-{
+-      struct dw_dma *dw = dev_id;
+-      u32 status;
+-
+-      dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__,
+-                      dma_readl(dw, STATUS_INT));
+-
+-      /*
+-       * Just disable the interrupts. We'll turn them back on in the
+-       * softirq handler.
+-       */
+-      channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
+-      channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
+-
+-      status = dma_readl(dw, STATUS_INT);
+-      if (status) {
+-              dev_err(dw->dma.dev,
+-                      "BUG: Unexpected interrupts pending: 0x%x\n",
+-                      status);
+-
+-              /* Try to recover */
+-              channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
+-              channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
+-              channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
+-              channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
+-      }
+-
+-      tasklet_schedule(&dw->tasklet);
+-
+-      return IRQ_HANDLED;
+-}
+-
+-/*----------------------------------------------------------------------*/
+-
+-static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
+-{
+-      struct dw_desc          *desc = txd_to_dw_desc(tx);
+-      struct dw_dma_chan      *dwc = to_dw_dma_chan(tx->chan);
+-      dma_cookie_t            cookie;
+-      unsigned long           flags;
+-
+-      spin_lock_irqsave(&dwc->lock, flags);
+-      cookie = dma_cookie_assign(tx);
+-
+-      /*
+-       * REVISIT: We should attempt to chain as many descriptors as
+-       * possible, perhaps even appending to those already submitted
+-       * for DMA. But this is hard to do in a race-free manner.
+-       */
+-      if (list_empty(&dwc->active_list)) {
+-              dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__,
+-                              desc->txd.cookie);
+-              list_add_tail(&desc->desc_node, &dwc->active_list);
+-              dwc_dostart(dwc, dwc_first_active(dwc));
+-      } else {
+-              dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__,
+-                              desc->txd.cookie);
+-
+-              list_add_tail(&desc->desc_node, &dwc->queue);
+-      }
+-
+-      spin_unlock_irqrestore(&dwc->lock, flags);
+-
+-      return cookie;
+-}
+-
+-static struct dma_async_tx_descriptor *
+-dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+-              size_t len, unsigned long flags)
+-{
+-      struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
+-      struct dw_dma           *dw = to_dw_dma(chan->device);
+-      struct dw_desc          *desc;
+-      struct dw_desc          *first;
+-      struct dw_desc          *prev;
+-      size_t                  xfer_count;
+-      size_t                  offset;
+-      unsigned int            src_width;
+-      unsigned int            dst_width;
+-      unsigned int            data_width;
+-      u32                     ctllo;
+-
+-      dev_vdbg(chan2dev(chan),
+-                      "%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__,
+-                      (unsigned long long)dest, (unsigned long long)src,
+-                      len, flags);
+-
+-      if (unlikely(!len)) {
+-              dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
+-              return NULL;
+-      }
+-
+-      dwc->direction = DMA_MEM_TO_MEM;
+-
+-      data_width = min_t(unsigned int, dw->data_width[dwc->src_master],
+-                         dw->data_width[dwc->dst_master]);
+-
+-      src_width = dst_width = min_t(unsigned int, data_width,
+-                                    dwc_fast_fls(src | dest | len));
+-
+-      ctllo = DWC_DEFAULT_CTLLO(chan)
+-                      | DWC_CTLL_DST_WIDTH(dst_width)
+-                      | DWC_CTLL_SRC_WIDTH(src_width)
+-                      | DWC_CTLL_DST_INC
+-                      | DWC_CTLL_SRC_INC
+-                      | DWC_CTLL_FC_M2M;
+-      prev = first = NULL;
+-
+-      for (offset = 0; offset < len; offset += xfer_count << src_width) {
+-              xfer_count = min_t(size_t, (len - offset) >> src_width,
+-                                         dwc->block_size);
+-
+-              desc = dwc_desc_get(dwc);
+-              if (!desc)
+-                      goto err_desc_get;
+-
+-              desc->lli.sar = src + offset;
+-              desc->lli.dar = dest + offset;
+-              desc->lli.ctllo = ctllo;
+-              desc->lli.ctlhi = xfer_count;
+-              desc->len = xfer_count << src_width;
+-
+-              if (!first) {
+-                      first = desc;
+-              } else {
+-                      prev->lli.llp = desc->txd.phys;
+-                      list_add_tail(&desc->desc_node,
+-                                      &first->tx_list);
+-              }
+-              prev = desc;
+-      }
+-
+-      if (flags & DMA_PREP_INTERRUPT)
+-              /* Trigger interrupt after last block */
+-              prev->lli.ctllo |= DWC_CTLL_INT_EN;
+-
+-      prev->lli.llp = 0;
+-      first->txd.flags = flags;
+-      first->total_len = len;
+-
+-      return &first->txd;
+-
+-err_desc_get:
+-      dwc_desc_put(dwc, first);
+-      return NULL;
+-}
+-
+-static struct dma_async_tx_descriptor *
+-dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+-              unsigned int sg_len, enum dma_transfer_direction direction,
+-              unsigned long flags, void *context)
+-{
+-      struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
+-      struct dw_dma           *dw = to_dw_dma(chan->device);
+-      struct dma_slave_config *sconfig = &dwc->dma_sconfig;
+-      struct dw_desc          *prev;
+-      struct dw_desc          *first;
+-      u32                     ctllo;
+-      dma_addr_t              reg;
+-      unsigned int            reg_width;
+-      unsigned int            mem_width;
+-      unsigned int            data_width;
+-      unsigned int            i;
+-      struct scatterlist      *sg;
+-      size_t                  total_len = 0;
+-
+-      dev_vdbg(chan2dev(chan), "%s\n", __func__);
+-
+-      if (unlikely(!is_slave_direction(direction) || !sg_len))
+-              return NULL;
+-
+-      dwc->direction = direction;
+-
+-      prev = first = NULL;
+-
+-      switch (direction) {
+-      case DMA_MEM_TO_DEV:
+-              reg_width = __fls(sconfig->dst_addr_width);
+-              reg = sconfig->dst_addr;
+-              ctllo = (DWC_DEFAULT_CTLLO(chan)
+-                              | DWC_CTLL_DST_WIDTH(reg_width)
+-                              | DWC_CTLL_DST_FIX
+-                              | DWC_CTLL_SRC_INC);
+-
+-              ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
+-                      DWC_CTLL_FC(DW_DMA_FC_D_M2P);
+-
+-              data_width = dw->data_width[dwc->src_master];
+-
+-              for_each_sg(sgl, sg, sg_len, i) {
+-                      struct dw_desc  *desc;
+-                      u32             len, dlen, mem;
+-
+-                      mem = sg_dma_address(sg);
+-                      len = sg_dma_len(sg);
+-
+-                      mem_width = min_t(unsigned int,
+-                                        data_width, dwc_fast_fls(mem | len));
+-
+-slave_sg_todev_fill_desc:
+-                      desc = dwc_desc_get(dwc);
+-                      if (!desc) {
+-                              dev_err(chan2dev(chan),
+-                                      "not enough descriptors available\n");
+-                              goto err_desc_get;
+-                      }
+-
+-                      desc->lli.sar = mem;
+-                      desc->lli.dar = reg;
+-                      desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
+-                      if ((len >> mem_width) > dwc->block_size) {
+-                              dlen = dwc->block_size << mem_width;
+-                              mem += dlen;
+-                              len -= dlen;
+-                      } else {
+-                              dlen = len;
+-                              len = 0;
+-                      }
+-
+-                      desc->lli.ctlhi = dlen >> mem_width;
+-                      desc->len = dlen;
+-
+-                      if (!first) {
+-                              first = desc;
+-                      } else {
+-                              prev->lli.llp = desc->txd.phys;
+-                              list_add_tail(&desc->desc_node,
+-                                              &first->tx_list);
+-                      }
+-                      prev = desc;
+-                      total_len += dlen;
+-
+-                      if (len)
+-                              goto slave_sg_todev_fill_desc;
+-              }
+-              break;
+-      case DMA_DEV_TO_MEM:
+-              reg_width = __fls(sconfig->src_addr_width);
+-              reg = sconfig->src_addr;
+-              ctllo = (DWC_DEFAULT_CTLLO(chan)
+-                              | DWC_CTLL_SRC_WIDTH(reg_width)
+-                              | DWC_CTLL_DST_INC
+-                              | DWC_CTLL_SRC_FIX);
+-
+-              ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
+-                      DWC_CTLL_FC(DW_DMA_FC_D_P2M);
+-
+-              data_width = dw->data_width[dwc->dst_master];
+-
+-              for_each_sg(sgl, sg, sg_len, i) {
+-                      struct dw_desc  *desc;
+-                      u32             len, dlen, mem;
+-
+-                      mem = sg_dma_address(sg);
+-                      len = sg_dma_len(sg);
+-
+-                      mem_width = min_t(unsigned int,
+-                                        data_width, dwc_fast_fls(mem | len));
+-
+-slave_sg_fromdev_fill_desc:
+-                      desc = dwc_desc_get(dwc);
+-                      if (!desc) {
+-                              dev_err(chan2dev(chan),
+-                                              "not enough descriptors available\n");
+-                              goto err_desc_get;
+-                      }
+-
+-                      desc->lli.sar = reg;
+-                      desc->lli.dar = mem;
+-                      desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
+-                      if ((len >> reg_width) > dwc->block_size) {
+-                              dlen = dwc->block_size << reg_width;
+-                              mem += dlen;
+-                              len -= dlen;
+-                      } else {
+-                              dlen = len;
+-                              len = 0;
+-                      }
+-                      desc->lli.ctlhi = dlen >> reg_width;
+-                      desc->len = dlen;
+-
+-                      if (!first) {
+-                              first = desc;
+-                      } else {
+-                              prev->lli.llp = desc->txd.phys;
+-                              list_add_tail(&desc->desc_node,
+-                                              &first->tx_list);
+-                      }
+-                      prev = desc;
+-                      total_len += dlen;
+-
+-                      if (len)
+-                              goto slave_sg_fromdev_fill_desc;
+-              }
+-              break;
+-      default:
+-              return NULL;
+-      }
+-
+-      if (flags & DMA_PREP_INTERRUPT)
+-              /* Trigger interrupt after last block */
+-              prev->lli.ctllo |= DWC_CTLL_INT_EN;
+-
+-      prev->lli.llp = 0;
+-      first->total_len = total_len;
+-
+-      return &first->txd;
+-
+-err_desc_get:
+-      dwc_desc_put(dwc, first);
+-      return NULL;
+-}
+-
+-/*
+- * Fix sconfig's burst size according to dw_dmac. We need to convert them as:
+- * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
+- *
+- * NOTE: burst size 2 is not supported by controller.
+- *
+- * This can be done by finding least significant bit set: n & (n - 1)
+- */
+-static inline void convert_burst(u32 *maxburst)
+-{
+-      if (*maxburst > 1)
+-              *maxburst = fls(*maxburst) - 2;
+-      else
+-              *maxburst = 0;
+-}
+-
+-static int
+-set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
+-{
+-      struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+-
+-      /* Check if chan will be configured for slave transfers */
+-      if (!is_slave_direction(sconfig->direction))
+-              return -EINVAL;
+-
+-      memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
+-      dwc->direction = sconfig->direction;
+-
+-      /* Take the request line from slave_id member */
+-      if (dwc->request_line == ~0)
+-              dwc->request_line = sconfig->slave_id;
+-
+-      convert_burst(&dwc->dma_sconfig.src_maxburst);
+-      convert_burst(&dwc->dma_sconfig.dst_maxburst);
+-
+-      return 0;
+-}
+-
+-static inline void dwc_chan_pause(struct dw_dma_chan *dwc)
+-{
+-      u32 cfglo = channel_readl(dwc, CFG_LO);
+-      unsigned int count = 20;        /* timeout iterations */
+-
+-      channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
+-      while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--)
+-              udelay(2);
+-
+-      dwc->paused = true;
+-}
+-
+-static inline void dwc_chan_resume(struct dw_dma_chan *dwc)
+-{
+-      u32 cfglo = channel_readl(dwc, CFG_LO);
+-
+-      channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
+-
+-      dwc->paused = false;
+-}
+-
+-static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+-                     unsigned long arg)
+-{
+-      struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
+-      struct dw_dma           *dw = to_dw_dma(chan->device);
+-      struct dw_desc          *desc, *_desc;
+-      unsigned long           flags;
+-      LIST_HEAD(list);
+-
+-      if (cmd == DMA_PAUSE) {
+-              spin_lock_irqsave(&dwc->lock, flags);
+-
+-              dwc_chan_pause(dwc);
+-
+-              spin_unlock_irqrestore(&dwc->lock, flags);
+-      } else if (cmd == DMA_RESUME) {
+-              if (!dwc->paused)
+-                      return 0;
+-
+-              spin_lock_irqsave(&dwc->lock, flags);
+-
+-              dwc_chan_resume(dwc);
+-
+-              spin_unlock_irqrestore(&dwc->lock, flags);
+-      } else if (cmd == DMA_TERMINATE_ALL) {
+-              spin_lock_irqsave(&dwc->lock, flags);
+-
+-              clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
+-
+-              dwc_chan_disable(dw, dwc);
+-
+-              dwc_chan_resume(dwc);
+-
+-              /* active_list entries will end up before queued entries */
+-              list_splice_init(&dwc->queue, &list);
+-              list_splice_init(&dwc->active_list, &list);
+-
+-              spin_unlock_irqrestore(&dwc->lock, flags);
+-
+-              /* Flush all pending and queued descriptors */
+-              list_for_each_entry_safe(desc, _desc, &list, desc_node)
+-                      dwc_descriptor_complete(dwc, desc, false);
+-      } else if (cmd == DMA_SLAVE_CONFIG) {
+-              return set_runtime_config(chan, (struct dma_slave_config *)arg);
+-      } else {
+-              return -ENXIO;
+-      }
+-
+-      return 0;
+-}
+-
+-static inline u32 dwc_get_residue(struct dw_dma_chan *dwc)
+-{
+-      unsigned long flags;
+-      u32 residue;
+-
+-      spin_lock_irqsave(&dwc->lock, flags);
+-
+-      residue = dwc->residue;
+-      if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue)
+-              residue -= dwc_get_sent(dwc);
+-
+-      spin_unlock_irqrestore(&dwc->lock, flags);
+-      return residue;
+-}
+-
+-static enum dma_status
+-dwc_tx_status(struct dma_chan *chan,
+-            dma_cookie_t cookie,
+-            struct dma_tx_state *txstate)
+-{
+-      struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
+-      enum dma_status         ret;
+-
+-      ret = dma_cookie_status(chan, cookie, txstate);
+-      if (ret != DMA_SUCCESS) {
+-              dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
+-
+-              ret = dma_cookie_status(chan, cookie, txstate);
+-      }
+-
+-      if (ret != DMA_SUCCESS)
+-              dma_set_residue(txstate, dwc_get_residue(dwc));
+-
+-      if (dwc->paused)
+-              return DMA_PAUSED;
+-
+-      return ret;
+-}
+-
+-static void dwc_issue_pending(struct dma_chan *chan)
+-{
+-      struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
+-
+-      if (!list_empty(&dwc->queue))
+-              dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
+-}
+-
+-static int dwc_alloc_chan_resources(struct dma_chan *chan)
+-{
+-      struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
+-      struct dw_dma           *dw = to_dw_dma(chan->device);
+-      struct dw_desc          *desc;
+-      int                     i;
+-      unsigned long           flags;
+-
+-      dev_vdbg(chan2dev(chan), "%s\n", __func__);
+-
+-      /* ASSERT:  channel is idle */
+-      if (dma_readl(dw, CH_EN) & dwc->mask) {
+-              dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
+-              return -EIO;
+-      }
+-
+-      dma_cookie_init(chan);
+-
+-      /*
+-       * NOTE: some controllers may have additional features that we
+-       * need to initialize here, like "scatter-gather" (which
+-       * doesn't mean what you think it means), and status writeback.
+-       */
+-
+-      dwc_set_masters(dwc);
+-
+-      spin_lock_irqsave(&dwc->lock, flags);
+-      i = dwc->descs_allocated;
+-      while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
+-              dma_addr_t phys;
+-
+-              spin_unlock_irqrestore(&dwc->lock, flags);
+-
+-              desc = dma_pool_alloc(dw->desc_pool, GFP_ATOMIC, &phys);
+-              if (!desc)
+-                      goto err_desc_alloc;
+-
+-              memset(desc, 0, sizeof(struct dw_desc));
+-
+-              INIT_LIST_HEAD(&desc->tx_list);
+-              dma_async_tx_descriptor_init(&desc->txd, chan);
+-              desc->txd.tx_submit = dwc_tx_submit;
+-              desc->txd.flags = DMA_CTRL_ACK;
+-              desc->txd.phys = phys;
+-
+-              dwc_desc_put(dwc, desc);
+-
+-              spin_lock_irqsave(&dwc->lock, flags);
+-              i = ++dwc->descs_allocated;
+-      }
+-
+-      spin_unlock_irqrestore(&dwc->lock, flags);
+-
+-      dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
+-
+-      return i;
+-
+-err_desc_alloc:
+-      dev_info(chan2dev(chan), "only allocated %d descriptors\n", i);
+-
+-      return i;
+-}
+-
+-static void dwc_free_chan_resources(struct dma_chan *chan)
+-{
+-      struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
+-      struct dw_dma           *dw = to_dw_dma(chan->device);
+-      struct dw_desc          *desc, *_desc;
+-      unsigned long           flags;
+-      LIST_HEAD(list);
+-
+-      dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__,
+-                      dwc->descs_allocated);
+-
+-      /* ASSERT:  channel is idle */
+-      BUG_ON(!list_empty(&dwc->active_list));
+-      BUG_ON(!list_empty(&dwc->queue));
+-      BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
+-
+-      spin_lock_irqsave(&dwc->lock, flags);
+-      list_splice_init(&dwc->free_list, &list);
+-      dwc->descs_allocated = 0;
+-      dwc->initialized = false;
+-      dwc->request_line = ~0;
+-
+-      /* Disable interrupts */
+-      channel_clear_bit(dw, MASK.XFER, dwc->mask);
+-      channel_clear_bit(dw, MASK.ERROR, dwc->mask);
+-
+-      spin_unlock_irqrestore(&dwc->lock, flags);
+-
+-      list_for_each_entry_safe(desc, _desc, &list, desc_node) {
+-              dev_vdbg(chan2dev(chan), "  freeing descriptor %p\n", desc);
+-              dma_pool_free(dw->desc_pool, desc, desc->txd.phys);
+-      }
+-
+-      dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
+-}
+-
+-/*----------------------------------------------------------------------*/
+-
+-struct dw_dma_of_filter_args {
+-      struct dw_dma *dw;
+-      unsigned int req;
+-      unsigned int src;
+-      unsigned int dst;
+-};
+-
+-static bool dw_dma_of_filter(struct dma_chan *chan, void *param)
+-{
+-      struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+-      struct dw_dma_of_filter_args *fargs = param;
+-
+-      /* Ensure the device matches our channel */
+-        if (chan->device != &fargs->dw->dma)
+-                return false;
+-
+-      dwc->request_line = fargs->req;
+-      dwc->src_master = fargs->src;
+-      dwc->dst_master = fargs->dst;
+-
+-      return true;
+-}
+-
+-static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
+-                                      struct of_dma *ofdma)
+-{
+-      struct dw_dma *dw = ofdma->of_dma_data;
+-      struct dw_dma_of_filter_args fargs = {
+-              .dw = dw,
+-      };
+-      dma_cap_mask_t cap;
+-
+-      if (dma_spec->args_count != 3)
+-              return NULL;
+-
+-      fargs.req = dma_spec->args[0];
+-      fargs.src = dma_spec->args[1];
+-      fargs.dst = dma_spec->args[2];
+-
+-      if (WARN_ON(fargs.req >= DW_DMA_MAX_NR_REQUESTS ||
+-                  fargs.src >= dw->nr_masters ||
+-                  fargs.dst >= dw->nr_masters))
+-              return NULL;
+-
+-      dma_cap_zero(cap);
+-      dma_cap_set(DMA_SLAVE, cap);
+-
+-      /* TODO: there should be a simpler way to do this */
+-      return dma_request_channel(cap, dw_dma_of_filter, &fargs);
+-}
+-
+-#ifdef CONFIG_ACPI
+-static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param)
+-{
+-      struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+-      struct acpi_dma_spec *dma_spec = param;
+-
+-      if (chan->device->dev != dma_spec->dev ||
+-          chan->chan_id != dma_spec->chan_id)
+-              return false;
+-
+-      dwc->request_line = dma_spec->slave_id;
+-      dwc->src_master = dwc_get_sms(NULL);
+-      dwc->dst_master = dwc_get_dms(NULL);
+-
+-      return true;
+-}
+-
+-static void dw_dma_acpi_controller_register(struct dw_dma *dw)
+-{
+-      struct device *dev = dw->dma.dev;
+-      struct acpi_dma_filter_info *info;
+-      int ret;
+-
+-      info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+-      if (!info)
+-              return;
+-
+-      dma_cap_zero(info->dma_cap);
+-      dma_cap_set(DMA_SLAVE, info->dma_cap);
+-      info->filter_fn = dw_dma_acpi_filter;
+-
+-      ret = devm_acpi_dma_controller_register(dev, acpi_dma_simple_xlate,
+-                                              info);
+-      if (ret)
+-              dev_err(dev, "could not register acpi_dma_controller\n");
+-}
+-#else /* !CONFIG_ACPI */
+-static inline void dw_dma_acpi_controller_register(struct dw_dma *dw) {}
+-#endif /* !CONFIG_ACPI */
+-
+-/* --------------------- Cyclic DMA API extensions -------------------- */
+-
+-/**
+- * dw_dma_cyclic_start - start the cyclic DMA transfer
+- * @chan: the DMA channel to start
+- *
+- * Must be called with soft interrupts disabled. Returns zero on success or
+- * -errno on failure.
+- */
+-int dw_dma_cyclic_start(struct dma_chan *chan)
+-{
+-      struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
+-      struct dw_dma           *dw = to_dw_dma(dwc->chan.device);
+-      unsigned long           flags;
+-
+-      if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
+-              dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
+-              return -ENODEV;
+-      }
+-
+-      spin_lock_irqsave(&dwc->lock, flags);
+-
+-      /* Assert channel is idle */
+-      if (dma_readl(dw, CH_EN) & dwc->mask) {
+-              dev_err(chan2dev(&dwc->chan),
+-                      "BUG: Attempted to start non-idle channel\n");
+-              dwc_dump_chan_regs(dwc);
+-              spin_unlock_irqrestore(&dwc->lock, flags);
+-              return -EBUSY;
+-      }
+-
+-      dma_writel(dw, CLEAR.ERROR, dwc->mask);
+-      dma_writel(dw, CLEAR.XFER, dwc->mask);
+-
+-      /* Setup DMAC channel registers */
+-      channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
+-      channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
+-      channel_writel(dwc, CTL_HI, 0);
+-
+-      channel_set_bit(dw, CH_EN, dwc->mask);
+-
+-      spin_unlock_irqrestore(&dwc->lock, flags);
+-
+-      return 0;
+-}
+-EXPORT_SYMBOL(dw_dma_cyclic_start);
+-
+-/**
+- * dw_dma_cyclic_stop - stop the cyclic DMA transfer
+- * @chan: the DMA channel to stop
+- *
+- * Must be called with soft interrupts disabled.
+- */
+-void dw_dma_cyclic_stop(struct dma_chan *chan)
+-{
+-      struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
+-      struct dw_dma           *dw = to_dw_dma(dwc->chan.device);
+-      unsigned long           flags;
+-
+-      spin_lock_irqsave(&dwc->lock, flags);
+-
+-      dwc_chan_disable(dw, dwc);
+-
+-      spin_unlock_irqrestore(&dwc->lock, flags);
+-}
+-EXPORT_SYMBOL(dw_dma_cyclic_stop);
+-
+-/**
+- * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
+- * @chan: the DMA channel to prepare
+- * @buf_addr: physical DMA address where the buffer starts
+- * @buf_len: total number of bytes for the entire buffer
+- * @period_len: number of bytes for each period
+- * @direction: transfer direction, to or from device
+- *
+- * Must be called before trying to start the transfer. Returns a valid struct
+- * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
+- */
+-struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
+-              dma_addr_t buf_addr, size_t buf_len, size_t period_len,
+-              enum dma_transfer_direction direction)
+-{
+-      struct dw_dma_chan              *dwc = to_dw_dma_chan(chan);
+-      struct dma_slave_config         *sconfig = &dwc->dma_sconfig;
+-      struct dw_cyclic_desc           *cdesc;
+-      struct dw_cyclic_desc           *retval = NULL;
+-      struct dw_desc                  *desc;
+-      struct dw_desc                  *last = NULL;
+-      unsigned long                   was_cyclic;
+-      unsigned int                    reg_width;
+-      unsigned int                    periods;
+-      unsigned int                    i;
+-      unsigned long                   flags;
+-
+-      spin_lock_irqsave(&dwc->lock, flags);
+-      if (dwc->nollp) {
+-              spin_unlock_irqrestore(&dwc->lock, flags);
+-              dev_dbg(chan2dev(&dwc->chan),
+-                              "channel doesn't support LLP transfers\n");
+-              return ERR_PTR(-EINVAL);
+-      }
+-
+-      if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
+-              spin_unlock_irqrestore(&dwc->lock, flags);
+-              dev_dbg(chan2dev(&dwc->chan),
+-                              "queue and/or active list are not empty\n");
+-              return ERR_PTR(-EBUSY);
+-      }
+-
+-      was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
+-      spin_unlock_irqrestore(&dwc->lock, flags);
+-      if (was_cyclic) {
+-              dev_dbg(chan2dev(&dwc->chan),
+-                              "channel already prepared for cyclic DMA\n");
+-              return ERR_PTR(-EBUSY);
+-      }
+-
+-      retval = ERR_PTR(-EINVAL);
+-
+-      if (unlikely(!is_slave_direction(direction)))
+-              goto out_err;
+-
+-      dwc->direction = direction;
+-
+-      if (direction == DMA_MEM_TO_DEV)
+-              reg_width = __ffs(sconfig->dst_addr_width);
+-      else
+-              reg_width = __ffs(sconfig->src_addr_width);
+-
+-      periods = buf_len / period_len;
+-
+-      /* Check for too big/unaligned periods and unaligned DMA buffer. */
+-      if (period_len > (dwc->block_size << reg_width))
+-              goto out_err;
+-      if (unlikely(period_len & ((1 << reg_width) - 1)))
+-              goto out_err;
+-      if (unlikely(buf_addr & ((1 << reg_width) - 1)))
+-              goto out_err;
+-
+-      retval = ERR_PTR(-ENOMEM);
+-
+-      if (periods > NR_DESCS_PER_CHANNEL)
+-              goto out_err;
+-
+-      cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
+-      if (!cdesc)
+-              goto out_err;
+-
+-      cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
+-      if (!cdesc->desc)
+-              goto out_err_alloc;
+-
+-      for (i = 0; i < periods; i++) {
+-              desc = dwc_desc_get(dwc);
+-              if (!desc)
+-                      goto out_err_desc_get;
+-
+-              switch (direction) {
+-              case DMA_MEM_TO_DEV:
+-                      desc->lli.dar = sconfig->dst_addr;
+-                      desc->lli.sar = buf_addr + (period_len * i);
+-                      desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
+-                                      | DWC_CTLL_DST_WIDTH(reg_width)
+-                                      | DWC_CTLL_SRC_WIDTH(reg_width)
+-                                      | DWC_CTLL_DST_FIX
+-                                      | DWC_CTLL_SRC_INC
+-                                      | DWC_CTLL_INT_EN);
+-
+-                      desc->lli.ctllo |= sconfig->device_fc ?
+-                              DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
+-                              DWC_CTLL_FC(DW_DMA_FC_D_M2P);
+-
+-                      break;
+-              case DMA_DEV_TO_MEM:
+-                      desc->lli.dar = buf_addr + (period_len * i);
+-                      desc->lli.sar = sconfig->src_addr;
+-                      desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
+-                                      | DWC_CTLL_SRC_WIDTH(reg_width)
+-                                      | DWC_CTLL_DST_WIDTH(reg_width)
+-                                      | DWC_CTLL_DST_INC
+-                                      | DWC_CTLL_SRC_FIX
+-                                      | DWC_CTLL_INT_EN);
+-
+-                      desc->lli.ctllo |= sconfig->device_fc ?
+-                              DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
+-                              DWC_CTLL_FC(DW_DMA_FC_D_P2M);
+-
+-                      break;
+-              default:
+-                      break;
+-              }
+-
+-              desc->lli.ctlhi = (period_len >> reg_width);
+-              cdesc->desc[i] = desc;
+-
+-              if (last)
+-                      last->lli.llp = desc->txd.phys;
+-
+-              last = desc;
+-      }
+-
+-      /* Let's make a cyclic list */
+-      last->lli.llp = cdesc->desc[0]->txd.phys;
+-
+-      dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu "
+-                      "period %zu periods %d\n", (unsigned long long)buf_addr,
+-                      buf_len, period_len, periods);
+-
+-      cdesc->periods = periods;
+-      dwc->cdesc = cdesc;
+-
+-      return cdesc;
+-
+-out_err_desc_get:
+-      while (i--)
+-              dwc_desc_put(dwc, cdesc->desc[i]);
+-out_err_alloc:
+-      kfree(cdesc);
+-out_err:
+-      clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
+-      return (struct dw_cyclic_desc *)retval;
+-}
+-EXPORT_SYMBOL(dw_dma_cyclic_prep);
+-
+-/**
+- * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
+- * @chan: the DMA channel to free
+- */
+-void dw_dma_cyclic_free(struct dma_chan *chan)
+-{
+-      struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
+-      struct dw_dma           *dw = to_dw_dma(dwc->chan.device);
+-      struct dw_cyclic_desc   *cdesc = dwc->cdesc;
+-      int                     i;
+-      unsigned long           flags;
+-
+-      dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__);
+-
+-      if (!cdesc)
+-              return;
+-
+-      spin_lock_irqsave(&dwc->lock, flags);
+-
+-      dwc_chan_disable(dw, dwc);
+-
+-      dma_writel(dw, CLEAR.ERROR, dwc->mask);
+-      dma_writel(dw, CLEAR.XFER, dwc->mask);
+-
+-      spin_unlock_irqrestore(&dwc->lock, flags);
+-
+-      for (i = 0; i < cdesc->periods; i++)
+-              dwc_desc_put(dwc, cdesc->desc[i]);
+-
+-      kfree(cdesc->desc);
+-      kfree(cdesc);
+-
+-      clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
+-}
+-EXPORT_SYMBOL(dw_dma_cyclic_free);
+-
+-/*----------------------------------------------------------------------*/
+-
+-static void dw_dma_off(struct dw_dma *dw)
+-{
+-      int i;
+-
+-      dma_writel(dw, CFG, 0);
+-
+-      channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
+-      channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
+-      channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
+-      channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
+-
+-      while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
+-              cpu_relax();
+-
+-      for (i = 0; i < dw->dma.chancnt; i++)
+-              dw->chan[i].initialized = false;
+-}
+-
+-#ifdef CONFIG_OF
+-static struct dw_dma_platform_data *
+-dw_dma_parse_dt(struct platform_device *pdev)
+-{
+-      struct device_node *np = pdev->dev.of_node;
+-      struct dw_dma_platform_data *pdata;
+-      u32 tmp, arr[4];
+-
+-      if (!np) {
+-              dev_err(&pdev->dev, "Missing DT data\n");
+-              return NULL;
+-      }
+-
+-      pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+-      if (!pdata)
+-              return NULL;
+-
+-      if (of_property_read_u32(np, "dma-channels", &pdata->nr_channels))
+-              return NULL;
+-
+-      if (of_property_read_bool(np, "is_private"))
+-              pdata->is_private = true;
+-
+-      if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
+-              pdata->chan_allocation_order = (unsigned char)tmp;
+-
+-      if (!of_property_read_u32(np, "chan_priority", &tmp))
+-              pdata->chan_priority = tmp;
+-
+-      if (!of_property_read_u32(np, "block_size", &tmp))
+-              pdata->block_size = tmp;
+-
+-      if (!of_property_read_u32(np, "dma-masters", &tmp)) {
+-              if (tmp > 4)
+-                      return NULL;
+-
+-              pdata->nr_masters = tmp;
+-      }
+-
+-      if (!of_property_read_u32_array(np, "data_width", arr,
+-                              pdata->nr_masters))
+-              for (tmp = 0; tmp < pdata->nr_masters; tmp++)
+-                      pdata->data_width[tmp] = arr[tmp];
+-
+-      return pdata;
+-}
+-#else
+-static inline struct dw_dma_platform_data *
+-dw_dma_parse_dt(struct platform_device *pdev)
+-{
+-      return NULL;
+-}
+-#endif
+-
+-static int dw_probe(struct platform_device *pdev)
+-{
+-      struct dw_dma_platform_data *pdata;
+-      struct resource         *io;
+-      struct dw_dma           *dw;
+-      size_t                  size;
+-      void __iomem            *regs;
+-      bool                    autocfg;
+-      unsigned int            dw_params;
+-      unsigned int            nr_channels;
+-      unsigned int            max_blk_size = 0;
+-      int                     irq;
+-      int                     err;
+-      int                     i;
+-
+-      irq = platform_get_irq(pdev, 0);
+-      if (irq < 0)
+-              return irq;
+-
+-      io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+-      regs = devm_ioremap_resource(&pdev->dev, io);
+-      if (IS_ERR(regs))
+-              return PTR_ERR(regs);
+-
+-      /* Apply default dma_mask if needed */
+-      if (!pdev->dev.dma_mask) {
+-              pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+-              pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+-      }
+-
+-      dw_params = dma_read_byaddr(regs, DW_PARAMS);
+-      autocfg = dw_params >> DW_PARAMS_EN & 0x1;
+-
+-      dev_dbg(&pdev->dev, "DW_PARAMS: 0x%08x\n", dw_params);
+-
+-      pdata = dev_get_platdata(&pdev->dev);
+-      if (!pdata)
+-              pdata = dw_dma_parse_dt(pdev);
+-
+-      if (!pdata && autocfg) {
+-              pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+-              if (!pdata)
+-                      return -ENOMEM;
+-
+-              /* Fill platform data with the default values */
+-              pdata->is_private = true;
+-              pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
+-              pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
+-      } else if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
+-              return -EINVAL;
+-
+-      if (autocfg)
+-              nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1;
+-      else
+-              nr_channels = pdata->nr_channels;
+-
+-      size = sizeof(struct dw_dma) + nr_channels * sizeof(struct dw_dma_chan);
+-      dw = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+-      if (!dw)
+-              return -ENOMEM;
+-
+-      dw->clk = devm_clk_get(&pdev->dev, "hclk");
+-      if (IS_ERR(dw->clk))
+-              return PTR_ERR(dw->clk);
+-      clk_prepare_enable(dw->clk);
+-
+-      dw->regs = regs;
+-
+-      /* Get hardware configuration parameters */
+-      if (autocfg) {
+-              max_blk_size = dma_readl(dw, MAX_BLK_SIZE);
+-
+-              dw->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1;
+-              for (i = 0; i < dw->nr_masters; i++) {
+-                      dw->data_width[i] =
+-                              (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3) + 2;
+-              }
+-      } else {
+-              dw->nr_masters = pdata->nr_masters;
+-              memcpy(dw->data_width, pdata->data_width, 4);
+-      }
+-
+-      /* Calculate all channel mask before DMA setup */
+-      dw->all_chan_mask = (1 << nr_channels) - 1;
+-
+-      /* Force dma off, just in case */
+-      dw_dma_off(dw);
+-
+-      /* Disable BLOCK interrupts as well */
+-      channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
+-
+-      err = devm_request_irq(&pdev->dev, irq, dw_dma_interrupt, 0,
+-                             "dw_dmac", dw);
+-      if (err)
+-              return err;
+-
+-      platform_set_drvdata(pdev, dw);
+-
+-      /* Create a pool of consistent memory blocks for hardware descriptors */
+-      dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", &pdev->dev,
+-                                       sizeof(struct dw_desc), 4, 0);
+-      if (!dw->desc_pool) {
+-              dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
+-              return -ENOMEM;
+-      }
+-
+-      tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
+-
+-      INIT_LIST_HEAD(&dw->dma.channels);
+-      for (i = 0; i < nr_channels; i++) {
+-              struct dw_dma_chan      *dwc = &dw->chan[i];
+-              int                     r = nr_channels - i - 1;
+-
+-              dwc->chan.device = &dw->dma;
+-              dma_cookie_init(&dwc->chan);
+-              if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
+-                      list_add_tail(&dwc->chan.device_node,
+-                                      &dw->dma.channels);
+-              else
+-                      list_add(&dwc->chan.device_node, &dw->dma.channels);
+-
+-              /* 7 is highest priority & 0 is lowest. */
+-              if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
+-                      dwc->priority = r;
+-              else
+-                      dwc->priority = i;
+-
+-              dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
+-              spin_lock_init(&dwc->lock);
+-              dwc->mask = 1 << i;
+-
+-              INIT_LIST_HEAD(&dwc->active_list);
+-              INIT_LIST_HEAD(&dwc->queue);
+-              INIT_LIST_HEAD(&dwc->free_list);
+-
+-              channel_clear_bit(dw, CH_EN, dwc->mask);
+-
+-              dwc->direction = DMA_TRANS_NONE;
+-              dwc->request_line = ~0;
+-
+-              /* Hardware configuration */
+-              if (autocfg) {
+-                      unsigned int dwc_params;
+-
+-                      dwc_params = dma_read_byaddr(regs + r * sizeof(u32),
+-                                                   DWC_PARAMS);
+-
+-                      dev_dbg(&pdev->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
+-                                          dwc_params);
+-
+-                      /* Decode maximum block size for given channel. The
+-                       * stored 4 bit value represents blocks from 0x00 for 3
+-                       * up to 0x0a for 4095. */
+-                      dwc->block_size =
+-                              (4 << ((max_blk_size >> 4 * i) & 0xf)) - 1;
+-                      dwc->nollp =
+-                              (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0;
+-              } else {
+-                      dwc->block_size = pdata->block_size;
+-
+-                      /* Check if channel supports multi block transfer */
+-                      channel_writel(dwc, LLP, 0xfffffffc);
+-                      dwc->nollp =
+-                              (channel_readl(dwc, LLP) & 0xfffffffc) == 0;
+-                      channel_writel(dwc, LLP, 0);
+-              }
+-      }
+-
+-      /* Clear all interrupts on all channels. */
+-      dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
+-      dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
+-      dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
+-      dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
+-      dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
+-
+-      dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
+-      dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
+-      if (pdata->is_private)
+-              dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
+-      dw->dma.dev = &pdev->dev;
+-      dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
+-      dw->dma.device_free_chan_resources = dwc_free_chan_resources;
+-
+-      dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
+-
+-      dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
+-      dw->dma.device_control = dwc_control;
+-
+-      dw->dma.device_tx_status = dwc_tx_status;
+-      dw->dma.device_issue_pending = dwc_issue_pending;
+-
+-      dma_writel(dw, CFG, DW_CFG_DMA_EN);
+-
+-      dev_info(&pdev->dev, "DesignWare DMA Controller, %d channels\n",
+-               nr_channels);
+-
+-      dma_async_device_register(&dw->dma);
+-
+-      if (pdev->dev.of_node) {
+-              err = of_dma_controller_register(pdev->dev.of_node,
+-                                               dw_dma_of_xlate, dw);
+-              if (err)
+-                      dev_err(&pdev->dev,
+-                              "could not register of_dma_controller\n");
+-      }
+-
+-      if (ACPI_HANDLE(&pdev->dev))
+-              dw_dma_acpi_controller_register(dw);
+-
+-      return 0;
+-}
+-
+-static int dw_remove(struct platform_device *pdev)
+-{
+-      struct dw_dma           *dw = platform_get_drvdata(pdev);
+-      struct dw_dma_chan      *dwc, *_dwc;
+-
+-      if (pdev->dev.of_node)
+-              of_dma_controller_free(pdev->dev.of_node);
+-      dw_dma_off(dw);
+-      dma_async_device_unregister(&dw->dma);
+-
+-      tasklet_kill(&dw->tasklet);
+-
+-      list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
+-                      chan.device_node) {
+-              list_del(&dwc->chan.device_node);
+-              channel_clear_bit(dw, CH_EN, dwc->mask);
+-      }
+-
+-      return 0;
+-}
+-
+-static void dw_shutdown(struct platform_device *pdev)
+-{
+-      struct dw_dma   *dw = platform_get_drvdata(pdev);
+-
+-      dw_dma_off(dw);
+-      clk_disable_unprepare(dw->clk);
+-}
+-
+-static int dw_suspend_noirq(struct device *dev)
+-{
+-      struct platform_device *pdev = to_platform_device(dev);
+-      struct dw_dma   *dw = platform_get_drvdata(pdev);
+-
+-      dw_dma_off(dw);
+-      clk_disable_unprepare(dw->clk);
+-
+-      return 0;
+-}
+-
+-static int dw_resume_noirq(struct device *dev)
+-{
+-      struct platform_device *pdev = to_platform_device(dev);
+-      struct dw_dma   *dw = platform_get_drvdata(pdev);
+-
+-      clk_prepare_enable(dw->clk);
+-      dma_writel(dw, CFG, DW_CFG_DMA_EN);
+-
+-      return 0;
+-}
+-
+-static const struct dev_pm_ops dw_dev_pm_ops = {
+-      .suspend_noirq = dw_suspend_noirq,
+-      .resume_noirq = dw_resume_noirq,
+-      .freeze_noirq = dw_suspend_noirq,
+-      .thaw_noirq = dw_resume_noirq,
+-      .restore_noirq = dw_resume_noirq,
+-      .poweroff_noirq = dw_suspend_noirq,
+-};
+-
+-#ifdef CONFIG_OF
+-static const struct of_device_id dw_dma_of_id_table[] = {
+-      { .compatible = "snps,dma-spear1340" },
+-      {}
+-};
+-MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
+-#endif
+-
+-#ifdef CONFIG_ACPI
+-static const struct acpi_device_id dw_dma_acpi_id_table[] = {
+-      { "INTL9C60", 0 },
+-      { }
+-};
+-#endif
+-
+-static struct platform_driver dw_driver = {
+-      .probe          = dw_probe,
+-      .remove         = dw_remove,
+-      .shutdown       = dw_shutdown,
+-      .driver = {
+-              .name   = "dw_dmac",
+-              .pm     = &dw_dev_pm_ops,
+-              .of_match_table = of_match_ptr(dw_dma_of_id_table),
+-              .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table),
+-      },
+-};
+-
+-static int __init dw_init(void)
+-{
+-      return platform_driver_register(&dw_driver);
+-}
+-subsys_initcall(dw_init);
+-
+-static void __exit dw_exit(void)
+-{
+-      platform_driver_unregister(&dw_driver);
+-}
+-module_exit(dw_exit);
+-
+-MODULE_LICENSE("GPL v2");
+-MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
+-MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
+-MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
+--- a/drivers/dma/dw_dmac_regs.h
++++ /dev/null
+@@ -1,311 +0,0 @@
+-/*
+- * Driver for the Synopsys DesignWare AHB DMA Controller
+- *
+- * Copyright (C) 2005-2007 Atmel Corporation
+- * Copyright (C) 2010-2011 ST Microelectronics
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+- */
+-
+-#include <linux/dmaengine.h>
+-#include <linux/dw_dmac.h>
+-
+-#define DW_DMA_MAX_NR_CHANNELS        8
+-#define DW_DMA_MAX_NR_REQUESTS        16
+-
+-/* flow controller */
+-enum dw_dma_fc {
+-      DW_DMA_FC_D_M2M,
+-      DW_DMA_FC_D_M2P,
+-      DW_DMA_FC_D_P2M,
+-      DW_DMA_FC_D_P2P,
+-      DW_DMA_FC_P_P2M,
+-      DW_DMA_FC_SP_P2P,
+-      DW_DMA_FC_P_M2P,
+-      DW_DMA_FC_DP_P2P,
+-};
+-
+-/*
+- * Redefine this macro to handle differences between 32- and 64-bit
+- * addressing, big vs. little endian, etc.
+- */
+-#define DW_REG(name)          u32 name; u32 __pad_##name
+-
+-/* Hardware register definitions. */
+-struct dw_dma_chan_regs {
+-      DW_REG(SAR);            /* Source Address Register */
+-      DW_REG(DAR);            /* Destination Address Register */
+-      DW_REG(LLP);            /* Linked List Pointer */
+-      u32     CTL_LO;         /* Control Register Low */
+-      u32     CTL_HI;         /* Control Register High */
+-      DW_REG(SSTAT);
+-      DW_REG(DSTAT);
+-      DW_REG(SSTATAR);
+-      DW_REG(DSTATAR);
+-      u32     CFG_LO;         /* Configuration Register Low */
+-      u32     CFG_HI;         /* Configuration Register High */
+-      DW_REG(SGR);
+-      DW_REG(DSR);
+-};
+-
+-struct dw_dma_irq_regs {
+-      DW_REG(XFER);
+-      DW_REG(BLOCK);
+-      DW_REG(SRC_TRAN);
+-      DW_REG(DST_TRAN);
+-      DW_REG(ERROR);
+-};
+-
+-struct dw_dma_regs {
+-      /* per-channel registers */
+-      struct dw_dma_chan_regs CHAN[DW_DMA_MAX_NR_CHANNELS];
+-
+-      /* irq handling */
+-      struct dw_dma_irq_regs  RAW;            /* r */
+-      struct dw_dma_irq_regs  STATUS;         /* r (raw & mask) */
+-      struct dw_dma_irq_regs  MASK;           /* rw (set = irq enabled) */
+-      struct dw_dma_irq_regs  CLEAR;          /* w (ack, affects "raw") */
+-
+-      DW_REG(STATUS_INT);                     /* r */
+-
+-      /* software handshaking */
+-      DW_REG(REQ_SRC);
+-      DW_REG(REQ_DST);
+-      DW_REG(SGL_REQ_SRC);
+-      DW_REG(SGL_REQ_DST);
+-      DW_REG(LAST_SRC);
+-      DW_REG(LAST_DST);
+-
+-      /* miscellaneous */
+-      DW_REG(CFG);
+-      DW_REG(CH_EN);
+-      DW_REG(ID);
+-      DW_REG(TEST);
+-
+-      /* reserved */
+-      DW_REG(__reserved0);
+-      DW_REG(__reserved1);
+-
+-      /* optional encoded params, 0x3c8..0x3f7 */
+-      u32     __reserved;
+-
+-      /* per-channel configuration registers */
+-      u32     DWC_PARAMS[DW_DMA_MAX_NR_CHANNELS];
+-      u32     MULTI_BLK_TYPE;
+-      u32     MAX_BLK_SIZE;
+-
+-      /* top-level parameters */
+-      u32     DW_PARAMS;
+-};
+-
+-#ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO
+-#define dma_readl_native ioread32be
+-#define dma_writel_native iowrite32be
+-#else
+-#define dma_readl_native readl
+-#define dma_writel_native writel
+-#endif
+-
+-/* To access the registers in early stage of probe */
+-#define dma_read_byaddr(addr, name) \
+-      dma_readl_native((addr) + offsetof(struct dw_dma_regs, name))
+-
+-/* Bitfields in DW_PARAMS */
+-#define DW_PARAMS_NR_CHAN     8               /* number of channels */
+-#define DW_PARAMS_NR_MASTER   11              /* number of AHB masters */
+-#define DW_PARAMS_DATA_WIDTH(n)       (15 + 2 * (n))
+-#define DW_PARAMS_DATA_WIDTH1 15              /* master 1 data width */
+-#define DW_PARAMS_DATA_WIDTH2 17              /* master 2 data width */
+-#define DW_PARAMS_DATA_WIDTH3 19              /* master 3 data width */
+-#define DW_PARAMS_DATA_WIDTH4 21              /* master 4 data width */
+-#define DW_PARAMS_EN          28              /* encoded parameters */
+-
+-/* Bitfields in DWC_PARAMS */
+-#define DWC_PARAMS_MBLK_EN    11              /* multi block transfer */
+-
+-/* Bitfields in CTL_LO */
+-#define DWC_CTLL_INT_EN               (1 << 0)        /* irqs enabled? */
+-#define DWC_CTLL_DST_WIDTH(n) ((n)<<1)        /* bytes per element */
+-#define DWC_CTLL_SRC_WIDTH(n) ((n)<<4)
+-#define DWC_CTLL_DST_INC      (0<<7)          /* DAR update/not */
+-#define DWC_CTLL_DST_DEC      (1<<7)
+-#define DWC_CTLL_DST_FIX      (2<<7)
+-#define DWC_CTLL_SRC_INC      (0<<7)          /* SAR update/not */
+-#define DWC_CTLL_SRC_DEC      (1<<9)
+-#define DWC_CTLL_SRC_FIX      (2<<9)
+-#define DWC_CTLL_DST_MSIZE(n) ((n)<<11)       /* burst, #elements */
+-#define DWC_CTLL_SRC_MSIZE(n) ((n)<<14)
+-#define DWC_CTLL_S_GATH_EN    (1 << 17)       /* src gather, !FIX */
+-#define DWC_CTLL_D_SCAT_EN    (1 << 18)       /* dst scatter, !FIX */
+-#define DWC_CTLL_FC(n)                ((n) << 20)
+-#define DWC_CTLL_FC_M2M               (0 << 20)       /* mem-to-mem */
+-#define DWC_CTLL_FC_M2P               (1 << 20)       /* mem-to-periph */
+-#define DWC_CTLL_FC_P2M               (2 << 20)       /* periph-to-mem */
+-#define DWC_CTLL_FC_P2P               (3 << 20)       /* periph-to-periph */
+-/* plus 4 transfer types for peripheral-as-flow-controller */
+-#define DWC_CTLL_DMS(n)               ((n)<<23)       /* dst master select */
+-#define DWC_CTLL_SMS(n)               ((n)<<25)       /* src master select */
+-#define DWC_CTLL_LLP_D_EN     (1 << 27)       /* dest block chain */
+-#define DWC_CTLL_LLP_S_EN     (1 << 28)       /* src block chain */
+-
+-/* Bitfields in CTL_HI */
+-#define DWC_CTLH_DONE         0x00001000
+-#define DWC_CTLH_BLOCK_TS_MASK        0x00000fff
+-
+-/* Bitfields in CFG_LO. Platform-configurable bits are in <linux/dw_dmac.h> */
+-#define DWC_CFGL_CH_PRIOR_MASK        (0x7 << 5)      /* priority mask */
+-#define DWC_CFGL_CH_PRIOR(x)  ((x) << 5)      /* priority */
+-#define DWC_CFGL_CH_SUSP      (1 << 8)        /* pause xfer */
+-#define DWC_CFGL_FIFO_EMPTY   (1 << 9)        /* pause xfer */
+-#define DWC_CFGL_HS_DST               (1 << 10)       /* handshake w/dst */
+-#define DWC_CFGL_HS_SRC               (1 << 11)       /* handshake w/src */
+-#define DWC_CFGL_MAX_BURST(x) ((x) << 20)
+-#define DWC_CFGL_RELOAD_SAR   (1 << 30)
+-#define DWC_CFGL_RELOAD_DAR   (1 << 31)
+-
+-/* Bitfields in CFG_HI. Platform-configurable bits are in <linux/dw_dmac.h> */
+-#define DWC_CFGH_DS_UPD_EN    (1 << 5)
+-#define DWC_CFGH_SS_UPD_EN    (1 << 6)
+-
+-/* Bitfields in SGR */
+-#define DWC_SGR_SGI(x)                ((x) << 0)
+-#define DWC_SGR_SGC(x)                ((x) << 20)
+-
+-/* Bitfields in DSR */
+-#define DWC_DSR_DSI(x)                ((x) << 0)
+-#define DWC_DSR_DSC(x)                ((x) << 20)
+-
+-/* Bitfields in CFG */
+-#define DW_CFG_DMA_EN         (1 << 0)
+-
+-enum dw_dmac_flags {
+-      DW_DMA_IS_CYCLIC = 0,
+-      DW_DMA_IS_SOFT_LLP = 1,
+-};
+-
+-struct dw_dma_chan {
+-      struct dma_chan                 chan;
+-      void __iomem                    *ch_regs;
+-      u8                              mask;
+-      u8                              priority;
+-      enum dma_transfer_direction     direction;
+-      bool                            paused;
+-      bool                            initialized;
+-
+-      /* software emulation of the LLP transfers */
+-      struct list_head        *tx_node_active;
+-
+-      spinlock_t              lock;
+-
+-      /* these other elements are all protected by lock */
+-      unsigned long           flags;
+-      struct list_head        active_list;
+-      struct list_head        queue;
+-      struct list_head        free_list;
+-      u32                     residue;
+-      struct dw_cyclic_desc   *cdesc;
+-
+-      unsigned int            descs_allocated;
+-
+-      /* hardware configuration */
+-      unsigned int            block_size;
+-      bool                    nollp;
+-
+-      /* custom slave configuration */
+-      unsigned int            request_line;
+-      unsigned char           src_master;
+-      unsigned char           dst_master;
+-
+-      /* configuration passed via DMA_SLAVE_CONFIG */
+-      struct dma_slave_config dma_sconfig;
+-};
+-
+-static inline struct dw_dma_chan_regs __iomem *
+-__dwc_regs(struct dw_dma_chan *dwc)
+-{
+-      return dwc->ch_regs;
+-}
+-
+-#define channel_readl(dwc, name) \
+-      dma_readl_native(&(__dwc_regs(dwc)->name))
+-#define channel_writel(dwc, name, val) \
+-      dma_writel_native((val), &(__dwc_regs(dwc)->name))
+-
+-static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
+-{
+-      return container_of(chan, struct dw_dma_chan, chan);
+-}
+-
+-struct dw_dma {
+-      struct dma_device       dma;
+-      void __iomem            *regs;
+-      struct dma_pool         *desc_pool;
+-      struct tasklet_struct   tasklet;
+-      struct clk              *clk;
+-
+-      u8                      all_chan_mask;
+-
+-      /* hardware configuration */
+-      unsigned char           nr_masters;
+-      unsigned char           data_width[4];
+-
+-      struct dw_dma_chan      chan[0];
+-};
+-
+-static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
+-{
+-      return dw->regs;
+-}
+-
+-#define dma_readl(dw, name) \
+-      dma_readl_native(&(__dw_regs(dw)->name))
+-#define dma_writel(dw, name, val) \
+-      dma_writel_native((val), &(__dw_regs(dw)->name))
+-
+-#define channel_set_bit(dw, reg, mask) \
+-      dma_writel(dw, reg, ((mask) << 8) | (mask))
+-#define channel_clear_bit(dw, reg, mask) \
+-      dma_writel(dw, reg, ((mask) << 8) | 0)
+-
+-static inline struct dw_dma *to_dw_dma(struct dma_device *ddev)
+-{
+-      return container_of(ddev, struct dw_dma, dma);
+-}
+-
+-/* LLI == Linked List Item; a.k.a. DMA block descriptor */
+-struct dw_lli {
+-      /* values that are not changed by hardware */
+-      u32             sar;
+-      u32             dar;
+-      u32             llp;            /* chain to next lli */
+-      u32             ctllo;
+-      /* values that may get written back: */
+-      u32             ctlhi;
+-      /* sstat and dstat can snapshot peripheral register state.
+-       * silicon config may discard either or both...
+-       */
+-      u32             sstat;
+-      u32             dstat;
+-};
+-
+-struct dw_desc {
+-      /* FIRST values the hardware uses */
+-      struct dw_lli                   lli;
+-
+-      /* THEN values for driver housekeeping */
+-      struct list_head                desc_node;
+-      struct list_head                tx_list;
+-      struct dma_async_tx_descriptor  txd;
+-      size_t                          len;
+-      size_t                          total_len;
+-};
+-
+-#define to_dw_desc(h) list_entry(h, struct dw_desc, desc_node)
+-
+-static inline struct dw_desc *
+-txd_to_dw_desc(struct dma_async_tx_descriptor *txd)
+-{
+-      return container_of(txd, struct dw_desc, txd);
+-}
index ce4ffae998d92f9e1939889a5a341f20dcfc579d..6337ef344d4fa922aef8fe459b3194c964e82f5c 100644 (file)
@@ -11,38 +11,36 @@ Signed-off-by: Wolfram Sang <wsa@the-dreams.de>
 (cherry picked from commit 6d4028c644edc0a2e4a8c948ebf81e8f2f09726e)
 Signed-off-by: Darren Hart <dvhart@linux.intel.com>
 ---
- drivers/i2c/busses/i2c-bfin-twi.c          | 9 +++++----
- drivers/i2c/busses/i2c-cbus-gpio.c         | 5 +++--
- drivers/i2c/busses/i2c-davinci.c           | 2 +-
- drivers/i2c/busses/i2c-gpio.c              | 6 +++---
- drivers/i2c/busses/i2c-imx.c               | 2 +-
- drivers/i2c/busses/i2c-mv64xxx.c           | 2 +-
- drivers/i2c/busses/i2c-nomadik.c           | 2 +-
- drivers/i2c/busses/i2c-nuc900.c            | 2 +-
- drivers/i2c/busses/i2c-ocores.c            | 2 +-
- drivers/i2c/busses/i2c-omap.c              | 2 +-
- drivers/i2c/busses/i2c-pca-platform.c      | 2 +-
- drivers/i2c/busses/i2c-powermac.c          | 2 +-
- drivers/i2c/busses/i2c-pxa.c               | 4 ++--
- drivers/i2c/busses/i2c-rcar.c              | 2 +-
- drivers/i2c/busses/i2c-s3c2410.c           | 2 +-
- drivers/i2c/busses/i2c-s6000.c             | 5 +++--
- drivers/i2c/busses/i2c-sh7760.c            | 2 +-
- drivers/i2c/busses/i2c-sh_mobile.c         | 2 +-
- drivers/i2c/busses/i2c-xiic.c              | 2 +-
- drivers/i2c/i2c-smbus.c                    | 2 +-
- drivers/i2c/muxes/i2c-arb-gpio-challenge.c | 2 +-
- drivers/i2c/muxes/i2c-mux-gpio.c           | 8 +++++---
- drivers/i2c/muxes/i2c-mux-pca9541.c        | 2 +-
- drivers/i2c/muxes/i2c-mux-pca954x.c        | 2 +-
- drivers/i2c/muxes/i2c-mux-pinctrl.c        | 2 +-
+ drivers/i2c/busses/i2c-bfin-twi.c          |    9 +++++----
+ drivers/i2c/busses/i2c-cbus-gpio.c         |    5 +++--
+ drivers/i2c/busses/i2c-davinci.c           |    2 +-
+ drivers/i2c/busses/i2c-gpio.c              |    6 +++---
+ drivers/i2c/busses/i2c-imx.c               |    2 +-
+ drivers/i2c/busses/i2c-mv64xxx.c           |    2 +-
+ drivers/i2c/busses/i2c-nomadik.c           |    2 +-
+ drivers/i2c/busses/i2c-nuc900.c            |    2 +-
+ drivers/i2c/busses/i2c-ocores.c            |    2 +-
+ drivers/i2c/busses/i2c-omap.c              |    2 +-
+ drivers/i2c/busses/i2c-pca-platform.c      |    2 +-
+ drivers/i2c/busses/i2c-powermac.c          |    2 +-
+ drivers/i2c/busses/i2c-pxa.c               |    4 ++--
+ drivers/i2c/busses/i2c-rcar.c              |    2 +-
+ drivers/i2c/busses/i2c-s3c2410.c           |    2 +-
+ drivers/i2c/busses/i2c-s6000.c             |    5 +++--
+ drivers/i2c/busses/i2c-sh7760.c            |    2 +-
+ drivers/i2c/busses/i2c-sh_mobile.c         |    2 +-
+ drivers/i2c/busses/i2c-xiic.c              |    2 +-
+ drivers/i2c/i2c-smbus.c                    |    2 +-
+ drivers/i2c/muxes/i2c-arb-gpio-challenge.c |    2 +-
+ drivers/i2c/muxes/i2c-mux-gpio.c           |    8 +++++---
+ drivers/i2c/muxes/i2c-mux-pca9541.c        |    2 +-
+ drivers/i2c/muxes/i2c-mux-pca954x.c        |    2 +-
+ drivers/i2c/muxes/i2c-mux-pinctrl.c        |    2 +-
  25 files changed, 40 insertions(+), 35 deletions(-)
 
-diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c
-index 05080c449c6b..6b0c5969b503 100644
 --- a/drivers/i2c/busses/i2c-bfin-twi.c
 +++ b/drivers/i2c/busses/i2c-bfin-twi.c
-@@ -662,8 +662,9 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev)
+@@ -662,8 +662,9 @@ static int i2c_bfin_twi_probe(struct pla
        p_adap->timeout = 5 * HZ;
        p_adap->retries = 3;
  
@@ -63,7 +61,7 @@ index 05080c449c6b..6b0c5969b503 100644
  out_error_pin_mux:
        iounmap(iface->regs_base);
  out_error_ioremap:
-@@ -726,7 +727,7 @@ static int i2c_bfin_twi_remove(struct platform_device *pdev)
+@@ -726,7 +727,7 @@ static int i2c_bfin_twi_remove(struct pl
  
        i2c_del_adapter(&(iface->adap));
        free_irq(iface->irq, iface);
@@ -72,11 +70,9 @@ index 05080c449c6b..6b0c5969b503 100644
        iounmap(iface->regs_base);
        kfree(iface);
  
-diff --git a/drivers/i2c/busses/i2c-cbus-gpio.c b/drivers/i2c/busses/i2c-cbus-gpio.c
-index 1be13ac11dc5..2d46f13adfdf 100644
 --- a/drivers/i2c/busses/i2c-cbus-gpio.c
 +++ b/drivers/i2c/busses/i2c-cbus-gpio.c
-@@ -233,8 +233,9 @@ static int cbus_i2c_probe(struct platform_device *pdev)
+@@ -233,8 +233,9 @@ static int cbus_i2c_probe(struct platfor
                chost->clk_gpio = of_get_gpio(dnode, 0);
                chost->dat_gpio = of_get_gpio(dnode, 1);
                chost->sel_gpio = of_get_gpio(dnode, 2);
@@ -88,11 +84,9 @@ index 1be13ac11dc5..2d46f13adfdf 100644
                chost->clk_gpio = pdata->clk_gpio;
                chost->dat_gpio = pdata->dat_gpio;
                chost->sel_gpio = pdata->sel_gpio;
-diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
-index fa556057d224..14c53cccdcf0 100644
 --- a/drivers/i2c/busses/i2c-davinci.c
 +++ b/drivers/i2c/busses/i2c-davinci.c
-@@ -665,7 +665,7 @@ static int davinci_i2c_probe(struct platform_device *pdev)
+@@ -665,7 +665,7 @@ static int davinci_i2c_probe(struct plat
  #endif
        dev->dev = &pdev->dev;
        dev->irq = irq->start;
@@ -101,11 +95,9 @@ index fa556057d224..14c53cccdcf0 100644
        platform_set_drvdata(pdev, dev);
  
        if (!dev->pdata && pdev->dev.of_node) {
-diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
-index bc6e139c6e7f..8cdb4f743e19 100644
 --- a/drivers/i2c/busses/i2c-gpio.c
 +++ b/drivers/i2c/busses/i2c-gpio.c
-@@ -137,9 +137,9 @@ static int i2c_gpio_probe(struct platform_device *pdev)
+@@ -137,9 +137,9 @@ static int i2c_gpio_probe(struct platfor
                if (ret)
                        return ret;
        } else {
@@ -117,7 +109,7 @@ index bc6e139c6e7f..8cdb4f743e19 100644
                sda_pin = pdata->sda_pin;
                scl_pin = pdata->scl_pin;
        }
-@@ -171,7 +171,7 @@ static int i2c_gpio_probe(struct platform_device *pdev)
+@@ -171,7 +171,7 @@ static int i2c_gpio_probe(struct platfor
                pdata->scl_pin = scl_pin;
                of_i2c_gpio_get_props(pdev->dev.of_node, pdata);
        } else {
@@ -126,11 +118,9 @@ index bc6e139c6e7f..8cdb4f743e19 100644
        }
  
        if (pdata->sda_is_open_drain) {
-diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
-index 9c0f8bda692a..a231d2fd91ce 100644
 --- a/drivers/i2c/busses/i2c-imx.c
 +++ b/drivers/i2c/busses/i2c-imx.c
-@@ -596,7 +596,7 @@ static int __init i2c_imx_probe(struct platform_device *pdev)
+@@ -596,7 +596,7 @@ static int __init i2c_imx_probe(struct p
                                                           &pdev->dev);
        struct imx_i2c_struct *i2c_imx;
        struct resource *res;
@@ -139,8 +129,6 @@ index 9c0f8bda692a..a231d2fd91ce 100644
        void __iomem *base;
        int irq, ret;
        u32 bitrate;
-diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
-index b1f42bf40963..9cc361d19941 100644
 --- a/drivers/i2c/busses/i2c-mv64xxx.c
 +++ b/drivers/i2c/busses/i2c-mv64xxx.c
 @@ -618,7 +618,7 @@ static int
@@ -152,11 +140,9 @@ index b1f42bf40963..9cc361d19941 100644
        struct resource *r;
        int     rc;
  
-diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
-index 650293ff4d62..9eb5852512d4 100644
 --- a/drivers/i2c/busses/i2c-nomadik.c
 +++ b/drivers/i2c/busses/i2c-nomadik.c
-@@ -974,7 +974,7 @@ static atomic_t adapter_id = ATOMIC_INIT(0);
+@@ -974,7 +974,7 @@ static atomic_t adapter_id = ATOMIC_INIT
  static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
  {
        int ret = 0;
@@ -165,11 +151,9 @@ index 650293ff4d62..9eb5852512d4 100644
        struct device_node *np = adev->dev.of_node;
        struct nmk_i2c_dev      *dev;
        struct i2c_adapter *adap;
-diff --git a/drivers/i2c/busses/i2c-nuc900.c b/drivers/i2c/busses/i2c-nuc900.c
-index 865ee350adb3..36394d737faf 100644
 --- a/drivers/i2c/busses/i2c-nuc900.c
 +++ b/drivers/i2c/busses/i2c-nuc900.c
-@@ -525,7 +525,7 @@ static int nuc900_i2c_probe(struct platform_device *pdev)
+@@ -525,7 +525,7 @@ static int nuc900_i2c_probe(struct platf
        struct resource *res;
        int ret;
  
@@ -178,11 +162,9 @@ index 865ee350adb3..36394d737faf 100644
        if (!pdata) {
                dev_err(&pdev->dev, "no platform data\n");
                return -EINVAL;
-diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
-index 0e1f8245e768..289960812efc 100644
 --- a/drivers/i2c/busses/i2c-ocores.c
 +++ b/drivers/i2c/busses/i2c-ocores.c
-@@ -369,7 +369,7 @@ static int ocores_i2c_probe(struct platform_device *pdev)
+@@ -369,7 +369,7 @@ static int ocores_i2c_probe(struct platf
        if (IS_ERR(i2c->base))
                return PTR_ERR(i2c->base);
  
@@ -191,11 +173,9 @@ index 0e1f8245e768..289960812efc 100644
        if (pdata) {
                i2c->reg_shift = pdata->reg_shift;
                i2c->reg_io_width = pdata->reg_io_width;
-diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
-index aa77626f8315..9a844003696b 100644
 --- a/drivers/i2c/busses/i2c-omap.c
 +++ b/drivers/i2c/busses/i2c-omap.c
-@@ -1079,7 +1079,7 @@ omap_i2c_probe(struct platform_device *pdev)
+@@ -1079,7 +1079,7 @@ omap_i2c_probe(struct platform_device *p
        struct i2c_adapter      *adap;
        struct resource         *mem;
        const struct omap_i2c_bus_platform_data *pdata =
@@ -204,11 +184,9 @@ index aa77626f8315..9a844003696b 100644
        struct device_node      *node = pdev->dev.of_node;
        const struct of_device_id *match;
        int irq;
-diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
-index aa00df14e30b..39e2755e3f25 100644
 --- a/drivers/i2c/busses/i2c-pca-platform.c
 +++ b/drivers/i2c/busses/i2c-pca-platform.c
-@@ -136,7 +136,7 @@ static int i2c_pca_pf_probe(struct platform_device *pdev)
+@@ -136,7 +136,7 @@ static int i2c_pca_pf_probe(struct platf
        struct i2c_pca_pf_data *i2c;
        struct resource *res;
        struct i2c_pca9564_pf_platform_data *platform_data =
@@ -217,11 +195,9 @@ index aa00df14e30b..39e2755e3f25 100644
        int ret = 0;
        int irq;
  
-diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
-index 8dc90da1e6e6..5a88364a542b 100644
 --- a/drivers/i2c/busses/i2c-powermac.c
 +++ b/drivers/i2c/busses/i2c-powermac.c
-@@ -398,7 +398,7 @@ static void i2c_powermac_register_devices(struct i2c_adapter *adap,
+@@ -398,7 +398,7 @@ static void i2c_powermac_register_device
  
  static int i2c_powermac_probe(struct platform_device *dev)
  {
@@ -230,11 +206,9 @@ index 8dc90da1e6e6..5a88364a542b 100644
        struct device_node *parent = NULL;
        struct i2c_adapter *adapter;
        const char *basename;
-diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
-index ea6d45d1dcd6..5e8e04273b78 100644
 --- a/drivers/i2c/busses/i2c-pxa.c
 +++ b/drivers/i2c/busses/i2c-pxa.c
-@@ -1072,7 +1072,7 @@ static int i2c_pxa_probe_pdata(struct platform_device *pdev,
+@@ -1072,7 +1072,7 @@ static int i2c_pxa_probe_pdata(struct pl
                               struct pxa_i2c *i2c,
                               enum pxa_i2c_types *i2c_types)
  {
@@ -243,7 +217,7 @@ index ea6d45d1dcd6..5e8e04273b78 100644
        const struct platform_device_id *id = platform_get_device_id(pdev);
  
        *i2c_types = id->driver_data;
-@@ -1085,7 +1085,7 @@ static int i2c_pxa_probe_pdata(struct platform_device *pdev,
+@@ -1085,7 +1085,7 @@ static int i2c_pxa_probe_pdata(struct pl
  
  static int i2c_pxa_probe(struct platform_device *dev)
  {
@@ -252,11 +226,9 @@ index ea6d45d1dcd6..5e8e04273b78 100644
        enum pxa_i2c_types i2c_type;
        struct pxa_i2c *i2c;
        struct resource *res = NULL;
-diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
-index 0fc585861610..e59c3f618542 100644
 --- a/drivers/i2c/busses/i2c-rcar.c
 +++ b/drivers/i2c/busses/i2c-rcar.c
-@@ -615,7 +615,7 @@ static const struct i2c_algorithm rcar_i2c_algo = {
+@@ -650,7 +650,7 @@ MODULE_DEVICE_TABLE(of, rcar_i2c_dt_ids)
  
  static int rcar_i2c_probe(struct platform_device *pdev)
  {
@@ -265,11 +237,9 @@ index 0fc585861610..e59c3f618542 100644
        struct rcar_i2c_priv *priv;
        struct i2c_adapter *adap;
        struct resource *res;
-diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
-index cab1c91b75a3..0a077b1ef94f 100644
 --- a/drivers/i2c/busses/i2c-s3c2410.c
 +++ b/drivers/i2c/busses/i2c-s3c2410.c
-@@ -1033,7 +1033,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
+@@ -1033,7 +1033,7 @@ static int s3c24xx_i2c_probe(struct plat
        int ret;
  
        if (!pdev->dev.of_node) {
@@ -278,11 +248,9 @@ index cab1c91b75a3..0a077b1ef94f 100644
                if (!pdata) {
                        dev_err(&pdev->dev, "no platform data\n");
                        return -EINVAL;
-diff --git a/drivers/i2c/busses/i2c-s6000.c b/drivers/i2c/busses/i2c-s6000.c
-index 7c1ca5aca088..dd186a037684 100644
 --- a/drivers/i2c/busses/i2c-s6000.c
 +++ b/drivers/i2c/busses/i2c-s6000.c
-@@ -290,8 +290,9 @@ static int s6i2c_probe(struct platform_device *dev)
+@@ -290,8 +290,9 @@ static int s6i2c_probe(struct platform_d
  
        clock = 0;
        bus_num = -1;
@@ -294,11 +262,9 @@ index 7c1ca5aca088..dd186a037684 100644
                bus_num = pdata->bus_num;
                clock = pdata->clock;
        }
-diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
-index 5351a2f34912..5e8f136e233f 100644
 --- a/drivers/i2c/busses/i2c-sh7760.c
 +++ b/drivers/i2c/busses/i2c-sh7760.c
-@@ -437,7 +437,7 @@ static int sh7760_i2c_probe(struct platform_device *pdev)
+@@ -437,7 +437,7 @@ static int sh7760_i2c_probe(struct platf
        struct cami2c *id;
        int ret;
  
@@ -307,11 +273,9 @@ index 5351a2f34912..5e8f136e233f 100644
        if (!pd) {
                dev_err(&pdev->dev, "no platform_data!\n");
                ret = -ENODEV;
-diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
-index debf745c0268..4e86a3190d46 100644
 --- a/drivers/i2c/busses/i2c-sh_mobile.c
 +++ b/drivers/i2c/busses/i2c-sh_mobile.c
-@@ -658,7 +658,7 @@ static int sh_mobile_i2c_hook_irqs(struct platform_device *dev, int hook)
+@@ -658,7 +658,7 @@ static int sh_mobile_i2c_hook_irqs(struc
  
  static int sh_mobile_i2c_probe(struct platform_device *dev)
  {
@@ -320,11 +284,9 @@ index debf745c0268..4e86a3190d46 100644
        struct sh_mobile_i2c_data *pd;
        struct i2c_adapter *adap;
        struct resource *res;
-diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
-index 3d0f0520c1b4..433f377b3869 100644
 --- a/drivers/i2c/busses/i2c-xiic.c
 +++ b/drivers/i2c/busses/i2c-xiic.c
-@@ -703,7 +703,7 @@ static int xiic_i2c_probe(struct platform_device *pdev)
+@@ -703,7 +703,7 @@ static int xiic_i2c_probe(struct platfor
        if (irq < 0)
                goto resource_missing;
  
@@ -333,11 +295,9 @@ index 3d0f0520c1b4..433f377b3869 100644
  
        i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
        if (!i2c)
-diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
-index 92cdd2323b03..44d4c6071c15 100644
 --- a/drivers/i2c/i2c-smbus.c
 +++ b/drivers/i2c/i2c-smbus.c
-@@ -137,7 +137,7 @@ static irqreturn_t smbalert_irq(int irq, void *d)
+@@ -137,7 +137,7 @@ static irqreturn_t smbalert_irq(int irq,
  static int smbalert_probe(struct i2c_client *ara,
                          const struct i2c_device_id *id)
  {
@@ -346,11 +306,9 @@ index 92cdd2323b03..44d4c6071c15 100644
        struct i2c_smbus_alert *alert;
        struct i2c_adapter *adapter = ara->adapter;
        int res;
-diff --git a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
-index 210b6f7b9028..f7bf24375f81 100644
 --- a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
 +++ b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
-@@ -131,7 +131,7 @@ static int i2c_arbitrator_probe(struct platform_device *pdev)
+@@ -131,7 +131,7 @@ static int i2c_arbitrator_probe(struct p
                dev_err(dev, "Cannot find device tree node\n");
                return -ENODEV;
        }
@@ -359,11 +317,9 @@ index 210b6f7b9028..f7bf24375f81 100644
                dev_err(dev, "Platform data is not supported\n");
                return -EINVAL;
        }
-diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
-index bb4f69f75f3c..774b9cc4601c 100644
 --- a/drivers/i2c/muxes/i2c-mux-gpio.c
 +++ b/drivers/i2c/muxes/i2c-mux-gpio.c
-@@ -148,12 +148,14 @@ static int i2c_mux_gpio_probe(struct platform_device *pdev)
+@@ -148,12 +148,14 @@ static int i2c_mux_gpio_probe(struct pla
  
        platform_set_drvdata(pdev, mux);
  
@@ -381,11 +337,9 @@ index bb4f69f75f3c..774b9cc4601c 100644
  
        /*
         * If a GPIO chip name is provided, the GPIO pin numbers provided are
-diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c
-index 966a18a5d12d..c4f08ad31183 100644
 --- a/drivers/i2c/muxes/i2c-mux-pca9541.c
 +++ b/drivers/i2c/muxes/i2c-mux-pca9541.c
-@@ -324,7 +324,7 @@ static int pca9541_probe(struct i2c_client *client,
+@@ -324,7 +324,7 @@ static int pca9541_probe(struct i2c_clie
                         const struct i2c_device_id *id)
  {
        struct i2c_adapter *adap = client->adapter;
@@ -394,11 +348,9 @@ index 966a18a5d12d..c4f08ad31183 100644
        struct pca9541 *data;
        int force;
        int ret = -ENODEV;
-diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
-index a531d801dbe4..bad5b84a5985 100644
 --- a/drivers/i2c/muxes/i2c-mux-pca954x.c
 +++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
-@@ -185,7 +185,7 @@ static int pca954x_probe(struct i2c_client *client,
+@@ -185,7 +185,7 @@ static int pca954x_probe(struct i2c_clie
                         const struct i2c_device_id *id)
  {
        struct i2c_adapter *adap = to_i2c_adapter(client->dev.parent);
@@ -407,11 +359,9 @@ index a531d801dbe4..bad5b84a5985 100644
        int num, force, class;
        struct pca954x *data;
        int ret = -ENODEV;
-diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c
-index a43c0ce5e3d8..0d082027c29a 100644
 --- a/drivers/i2c/muxes/i2c-mux-pinctrl.c
 +++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c
-@@ -145,7 +145,7 @@ static int i2c_mux_pinctrl_probe(struct platform_device *pdev)
+@@ -145,7 +145,7 @@ static int i2c_mux_pinctrl_probe(struct
  
        mux->dev = &pdev->dev;
  
@@ -420,6 +370,3 @@ index a43c0ce5e3d8..0d082027c29a 100644
        if (!mux->pdata) {
                ret = i2c_mux_pinctrl_parse_dt(mux, pdev);
                if (ret < 0)
--- 
-1.8.5.rc3
-
index c55867de3b5169eb61e1a7ff074df73faa9f5a8f..7a3e4605a543af312fbdbe0bb054e16ad68afd53 100644 (file)
@@ -17,12 +17,10 @@ Signed-off-by: Kukjin Kim <kgene.kim@samsung.com>
 (cherry picked from commit 83978253d0c3e12bf81d4b5f419a0200d5cb19a6)
 Signed-off-by: Darren Hart <dvhart@linux.intel.com>
 ---
- arch/arm/mach-exynos/Kconfig | 9 ++++++---
- drivers/pinctrl/Kconfig      | 5 +++--
+ arch/arm/mach-exynos/Kconfig |    9 ++++++---
+ drivers/pinctrl/Kconfig      |    5 +++--
  2 files changed, 9 insertions(+), 5 deletions(-)
 
-diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
-index ff18fc2ea46f..144fcc231074 100644
 --- a/arch/arm/mach-exynos/Kconfig
 +++ b/arch/arm/mach-exynos/Kconfig
 @@ -17,6 +17,7 @@ config ARCH_EXYNOS4
@@ -75,7 +73,7 @@ index ff18fc2ea46f..144fcc231074 100644
        select S5P_SLEEP if PM
 @@ -78,7 +84,6 @@ config SOC_EXYNOS5440
        select ARCH_HAS_OPP
-       select ARM_ARCH_TIMER
+       select HAVE_ARM_ARCH_TIMER
        select AUTO_ZRELADDR
 -      select PINCTRL
        select PINCTRL_EXYNOS5440
@@ -90,8 +88,6 @@ index ff18fc2ea46f..144fcc231074 100644
        select S5P_DEV_MFC
        select USE_OF
        help
-diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
-index adeb1fed04cc..6585b37bbdbc 100644
 --- a/drivers/pinctrl/Kconfig
 +++ b/drivers/pinctrl/Kconfig
 @@ -219,12 +219,13 @@ config PINCTRL_SAMSUNG
@@ -110,6 +106,3 @@ index adeb1fed04cc..6585b37bbdbc 100644
        select PINMUX
        select PINCONF
  
--- 
-1.8.5.rc3
-
index d099591d9c31cace219b04a11572f47db93373a7..286cad27debdf28dffb8e0e9710589e5c8d720fb 100644 (file)
@@ -12,18 +12,16 @@ Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
 Signed-off-by: Daniel Sangorrin <daniel.sangorrin@toshiba.co.jp>
 Signed-off-by: Yoshitake Kobayashi <yoshitake.kobayashi@toshiba.co.jp>
 ---
- MAINTAINERS                    |  1 +
- drivers/cpuidle/Kconfig        |  6 +++
- drivers/cpuidle/Makefile       |  1 +
- drivers/cpuidle/cpuidle-zynq.c | 83 ++++++++++++++++++++++++++++++++++++++++++
+ MAINTAINERS                    |    1 
+ drivers/cpuidle/Kconfig        |    6 ++
+ drivers/cpuidle/Makefile       |    1 
+ drivers/cpuidle/cpuidle-zynq.c |   83 +++++++++++++++++++++++++++++++++++++++++
  4 files changed, 91 insertions(+)
  create mode 100644 drivers/cpuidle/cpuidle-zynq.c
 
-diff --git a/MAINTAINERS b/MAINTAINERS
-index 48c748080c96..30f6d87daadd 100644
 --- a/MAINTAINERS
 +++ b/MAINTAINERS
-@@ -1309,6 +1309,7 @@ W:       http://wiki.xilinx.com
+@@ -1310,6 +1310,7 @@ W:       http://wiki.xilinx.com
  T:    git git://git.xilinx.com/linux-xlnx.git
  S:    Supported
  F:    arch/arm/mach-zynq/
@@ -31,8 +29,6 @@ index 48c748080c96..30f6d87daadd 100644
  
  ARM64 PORT (AARCH64 ARCHITECTURE)
  M:    Catalin Marinas <catalin.marinas@arm.com>
-diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
-index c4cc27e5c8a5..8272a08b137b 100644
 --- a/drivers/cpuidle/Kconfig
 +++ b/drivers/cpuidle/Kconfig
 @@ -39,4 +39,10 @@ config CPU_IDLE_CALXEDA
@@ -46,18 +42,13 @@ index c4cc27e5c8a5..8272a08b137b 100644
 +        Select this to enable cpuidle on Xilinx Zynq processors.
 +
  endif
-diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
-index 0d8bd55e776f..8767a7b3eb91 100644
 --- a/drivers/cpuidle/Makefile
 +++ b/drivers/cpuidle/Makefile
-@@ -7,3 +7,4 @@ obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
+@@ -7,3 +7,4 @@ obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
  
  obj-$(CONFIG_CPU_IDLE_CALXEDA) += cpuidle-calxeda.o
  obj-$(CONFIG_ARCH_KIRKWOOD) += cpuidle-kirkwood.o
 +obj-$(CONFIG_CPU_IDLE_ZYNQ) += cpuidle-zynq.o
-diff --git a/drivers/cpuidle/cpuidle-zynq.c b/drivers/cpuidle/cpuidle-zynq.c
-new file mode 100644
-index 000000000000..38e03a183591
 --- /dev/null
 +++ b/drivers/cpuidle/cpuidle-zynq.c
 @@ -0,0 +1,83 @@
@@ -144,6 +135,3 @@ index 000000000000..38e03a183591
 +}
 +
 +device_initcall(zynq_cpuidle_init);
--- 
-1.8.5.rc3
-