drm: switch possible crtc/clones over to encoders
[profile/ivi/libdrm.git] / linux-core / drm_compat.c
index 4825f0c..dbb3157 100644 (file)
@@ -1,5 +1,5 @@
 /**************************************************************************
- * 
+ *
  * This kernel module is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License as
  * published by the Free Software Foundation; either version 2 of the
@@ -13,7 +13,7 @@
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- * 
+ *
  **************************************************************************/
 /*
  * This code provides access to unexported mm kernel features. It is necessary
@@ -21,7 +21,7 @@
  * directly.
  *
  * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
- *          Linux kernel mm subsystem authors. 
+ *          Linux kernel mm subsystem authors.
  *          (Most code taken from there).
  */
 
@@ -50,7 +50,7 @@ int drm_unmap_page_from_agp(struct page *page)
          * performance reasons */
         return i;
 }
-#endif 
+#endif
 
 
 #if  (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
@@ -80,20 +80,25 @@ pgprot_t vm_get_page_prot(unsigned long vm_flags)
 
 /*
  * vm code for kernels below 2.6.15 in which version a major vm write
- * occured. This implement a simple straightforward 
+ * occured. This implement a simple straightforward
  * version similar to what's going to be
  * in kernel 2.6.19+
  * Kernels below 2.6.15 use nopage whereas 2.6.19 and upwards use
  * nopfn.
- */ 
+ */
 
 static struct {
        spinlock_t lock;
        struct page *dummy_page;
        atomic_t present;
-} drm_np_retry = 
+} drm_np_retry =
 {SPIN_LOCK_UNLOCKED, NOPAGE_OOM, ATOMIC_INIT(0)};
 
+
+static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
+                                   struct fault_data *data);
+
+
 struct page * get_nopage_retry(void)
 {
        if (atomic_read(&drm_np_retry.present) == 0) {
@@ -121,7 +126,7 @@ void free_nopage_retry(void)
 }
 
 struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
-                              unsigned long address, 
+                              unsigned long address,
                               int *type)
 {
        struct fault_data data;
@@ -180,7 +185,7 @@ static int drm_pte_is_clear(struct vm_area_struct *vma,
        return ret;
 }
 
-int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
+static int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
                  unsigned long pfn)
 {
        int ret;
@@ -190,14 +195,112 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
        ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, vma->vm_page_prot);
        return ret;
 }
+
+
+static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
+                                   struct fault_data *data)
+{
+       unsigned long address = data->address;
+       struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
+       unsigned long page_offset;
+       struct page *page = NULL;
+       struct drm_ttm *ttm;
+       struct drm_device *dev;
+       unsigned long pfn;
+       int err;
+       unsigned long bus_base;
+       unsigned long bus_offset;
+       unsigned long bus_size;
+
+       dev = bo->dev;
+       drm_bo_read_lock(&dev->bm.bm_lock, 0);
+
+       mutex_lock(&bo->mutex);
+
+       err = drm_bo_wait(bo, 0, 1, 0, 1);
+       if (err) {
+               data->type = (err == -EAGAIN) ?
+                       VM_FAULT_MINOR : VM_FAULT_SIGBUS;
+               goto out_unlock;
+       }
+
+
+       /*
+        * If buffer happens to be in a non-mappable location,
+        * move it to a mappable.
+        */
+
+       if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
+               unsigned long _end = jiffies + 3*DRM_HZ;
+               uint32_t new_mask = bo->mem.proposed_flags |
+                       DRM_BO_FLAG_MAPPABLE |
+                       DRM_BO_FLAG_FORCE_MAPPABLE;
+
+               do {
+                       err = drm_bo_move_buffer(bo, new_mask, 0, 0);
+               } while((err == -EAGAIN) && !time_after_eq(jiffies, _end));
+
+               if (err) {
+                       DRM_ERROR("Timeout moving buffer to mappable location.\n");
+                       data->type = VM_FAULT_SIGBUS;
+                       goto out_unlock;
+               }
+       }
+
+       if (address > vma->vm_end) {
+               data->type = VM_FAULT_SIGBUS;
+               goto out_unlock;
+       }
+
+       dev = bo->dev;
+       err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
+                               &bus_size);
+
+       if (err) {
+               data->type = VM_FAULT_SIGBUS;
+               goto out_unlock;
+       }
+
+       page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
+
+       if (bus_size) {
+               struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type];
+
+               pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
+               vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
+       } else {
+               ttm = bo->ttm;
+
+               drm_ttm_fixup_caching(ttm);
+               page = drm_ttm_get_page(ttm, page_offset);
+               if (!page) {
+                       data->type = VM_FAULT_OOM;
+                       goto out_unlock;
+               }
+               pfn = page_to_pfn(page);
+               vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ?
+                       vm_get_page_prot(vma->vm_flags) :
+                       drm_io_prot(_DRM_TTM, vma);
+       }
+
+       err = vm_insert_pfn(vma, address, pfn);
+
+       if (!err || err == -EBUSY)
+               data->type = VM_FAULT_MINOR;
+       else
+               data->type = VM_FAULT_OOM;
+out_unlock:
+       mutex_unlock(&bo->mutex);
+       drm_bo_read_unlock(&dev->bm.bm_lock);
+       return NULL;
+}
+
 #endif
 
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19) && !defined(DRM_FULL_MM_COMPAT))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) && \
+  !defined(DRM_FULL_MM_COMPAT)
 
 /**
- * While waiting for the fault() handler to appear in
- * we accomplish approximately
- * the same wrapping it with nopfn.
  */
 
 unsigned long drm_bo_vm_nopfn(struct vm_area_struct * vma,
@@ -227,7 +330,7 @@ unsigned long drm_bo_vm_nopfn(struct vm_area_struct * vma,
  * VM compatibility code for 2.6.15-2.6.18. This code implements a complicated
  * workaround for a single BUG statement in do_no_page in these versions. The
  * tricky thing is that we need to take the mmap_sem in exclusive mode for _all_
- * vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to 
+ * vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to
  * check first take the dev->struct_mutex, and then trylock all mmap_sems. If this
  * fails for a single mmap_sem, we have to release all sems and the dev->struct_mutex,
  * release the cpu and retry. We also need to keep track of all vmas mapping the ttm.
@@ -248,14 +351,14 @@ typedef struct vma_entry {
 
 
 struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
-                              unsigned long address, 
+                              unsigned long address,
                               int *type)
 {
-       drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
+       struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
        unsigned long page_offset;
        struct page *page;
-       drm_ttm_t *ttm; 
-       drm_device_t *dev;
+       struct drm_ttm *ttm;
+       struct drm_device *dev;
 
        mutex_lock(&bo->mutex);
 
@@ -266,7 +369,7 @@ struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
                page = NOPAGE_SIGBUS;
                goto out_unlock;
        }
-       
+
        dev = bo->dev;
 
        if (drm_mem_reg_is_pci(dev, &bo->mem)) {
@@ -295,18 +398,18 @@ out_unlock:
 
 int drm_bo_map_bound(struct vm_area_struct *vma)
 {
-       drm_buffer_object_t *bo = (drm_buffer_object_t *)vma->vm_private_data;
+       struct drm_buffer_object *bo = (struct drm_buffer_object *)vma->vm_private_data;
        int ret = 0;
        unsigned long bus_base;
        unsigned long bus_offset;
        unsigned long bus_size;
-       
-       ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base, 
+
+       ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base,
                                &bus_offset, &bus_size);
        BUG_ON(ret);
 
        if (bus_size) {
-               drm_mem_type_manager_t *man = &bo->dev->bm.man[bo->mem.mem_type];
+               struct drm_mem_type_manager *man = &bo->dev->bm.man[bo->mem.mem_type];
                unsigned long pfn = (bus_base + bus_offset) >> PAGE_SHIFT;
                pgprot_t pgprot = drm_io_prot(man->drm_bus_maptype, vma);
                ret = io_remap_pfn_range(vma, vma->vm_start, pfn,
@@ -316,9 +419,9 @@ int drm_bo_map_bound(struct vm_area_struct *vma)
 
        return ret;
 }
-       
 
-int drm_bo_add_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma)
+
+int drm_bo_add_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma)
 {
        p_mm_entry_t *entry, *n_entry;
        vma_entry_t *v_entry;
@@ -354,7 +457,7 @@ int drm_bo_add_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma)
        return 0;
 }
 
-void drm_bo_delete_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma)
+void drm_bo_delete_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma)
 {
        p_mm_entry_t *entry, *n;
        vma_entry_t *v_entry, *v_n;
@@ -386,11 +489,11 @@ void drm_bo_delete_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma)
 
 
 
-int drm_bo_lock_kmm(drm_buffer_object_t * bo)
+int drm_bo_lock_kmm(struct drm_buffer_object * bo)
 {
        p_mm_entry_t *entry;
        int lock_ok = 1;
-       
+
        list_for_each_entry(entry, &bo->p_mm_list, head) {
                BUG_ON(entry->locked);
                if (!down_write_trylock(&entry->mm->mmap_sem)) {
@@ -404,7 +507,7 @@ int drm_bo_lock_kmm(drm_buffer_object_t * bo)
                return 0;
 
        list_for_each_entry(entry, &bo->p_mm_list, head) {
-               if (!entry->locked) 
+               if (!entry->locked)
                        break;
                up_write(&entry->mm->mmap_sem);
                entry->locked = 0;
@@ -418,10 +521,10 @@ int drm_bo_lock_kmm(drm_buffer_object_t * bo)
        return -EAGAIN;
 }
 
-void drm_bo_unlock_kmm(drm_buffer_object_t * bo)
+void drm_bo_unlock_kmm(struct drm_buffer_object * bo)
 {
        p_mm_entry_t *entry;
-       
+
        list_for_each_entry(entry, &bo->p_mm_list, head) {
                BUG_ON(!entry->locked);
                up_write(&entry->mm->mmap_sem);
@@ -429,7 +532,7 @@ void drm_bo_unlock_kmm(drm_buffer_object_t * bo)
        }
 }
 
-int drm_bo_remap_bound(drm_buffer_object_t *bo) 
+int drm_bo_remap_bound(struct drm_buffer_object *bo)
 {
        vma_entry_t *v_entry;
        int ret = 0;
@@ -445,14 +548,263 @@ int drm_bo_remap_bound(drm_buffer_object_t *bo)
        return ret;
 }
 
-void drm_bo_finish_unmap(drm_buffer_object_t *bo)
+void drm_bo_finish_unmap(struct drm_buffer_object *bo)
 {
        vma_entry_t *v_entry;
 
        list_for_each_entry(v_entry, &bo->vma_list, head) {
-               v_entry->vma->vm_flags &= ~VM_PFNMAP; 
+               v_entry->vma->vm_flags &= ~VM_PFNMAP;
+       }
+}
+
+#endif
+
+#ifdef DRM_IDR_COMPAT_FN
+/* only called when idp->lock is held */
+static void __free_layer(struct idr *idp, struct idr_layer *p)
+{
+       p->ary[0] = idp->id_free;
+       idp->id_free = p;
+       idp->id_free_cnt++;
+}
+
+static void free_layer(struct idr *idp, struct idr_layer *p)
+{
+       unsigned long flags;
+
+       /*
+        * Depends on the return element being zeroed.
+        */
+       spin_lock_irqsave(&idp->lock, flags);
+       __free_layer(idp, p);
+       spin_unlock_irqrestore(&idp->lock, flags);
+}
+
+/**
+ * idr_for_each - iterate through all stored pointers
+ * @idp: idr handle
+ * @fn: function to be called for each pointer
+ * @data: data passed back to callback function
+ *
+ * Iterate over the pointers registered with the given idr.  The
+ * callback function will be called for each pointer currently
+ * registered, passing the id, the pointer and the data pointer passed
+ * to this function.  It is not safe to modify the idr tree while in
+ * the callback, so functions such as idr_get_new and idr_remove are
+ * not allowed.
+ *
+ * We check the return of @fn each time. If it returns anything other
+ * than 0, we break out and return that value.
+ *
+* The caller must serialize idr_find() vs idr_get_new() and idr_remove().
+ */
+int idr_for_each(struct idr *idp,
+                int (*fn)(int id, void *p, void *data), void *data)
+{
+       int n, id, max, error = 0;
+       struct idr_layer *p;
+       struct idr_layer *pa[MAX_LEVEL];
+       struct idr_layer **paa = &pa[0];
+
+       n = idp->layers * IDR_BITS;
+       p = idp->top;
+       max = 1 << n;
+
+       id = 0;
+       while (id < max) {
+               while (n > 0 && p) {
+                       n -= IDR_BITS;
+                       *paa++ = p;
+                       p = p->ary[(id >> n) & IDR_MASK];
+               }
+
+               if (p) {
+                       error = fn(id, (void *)p, data);
+                       if (error)
+                               break;
+               }
+
+               id += 1 << n;
+               while (n < fls(id)) {
+                       n += IDR_BITS;
+                       p = *--paa;
+               }
+       }
+
+       return error;
+}
+EXPORT_SYMBOL(idr_for_each);
+
+/**
+ * idr_remove_all - remove all ids from the given idr tree
+ * @idp: idr handle
+ *
+ * idr_destroy() only frees up unused, cached idp_layers, but this
+ * function will remove all id mappings and leave all idp_layers
+ * unused.
+ *
+ * A typical clean-up sequence for objects stored in an idr tree, will
+ * use idr_for_each() to free all objects, if necessay, then
+ * idr_remove_all() to remove all ids, and idr_destroy() to free
+ * up the cached idr_layers.
+ */
+void idr_remove_all(struct idr *idp)
+{
+       int n, id, max, error = 0;
+       struct idr_layer *p;
+       struct idr_layer *pa[MAX_LEVEL];
+       struct idr_layer **paa = &pa[0];
+
+       n = idp->layers * IDR_BITS;
+       p = idp->top;
+       max = 1 << n;
+
+       id = 0;
+       while (id < max && !error) {
+               while (n > IDR_BITS && p) {
+                       n -= IDR_BITS;
+                       *paa++ = p;
+                       p = p->ary[(id >> n) & IDR_MASK];
+               }
+
+               id += 1 << n;
+               while (n < fls(id)) {
+                       if (p) {
+                               memset(p, 0, sizeof *p);
+                               free_layer(idp, p);
+                       }
+                       n += IDR_BITS;
+                       p = *--paa;
+               }
+       }
+       idp->top = NULL;
+       idp->layers = 0;
+}
+EXPORT_SYMBOL(idr_remove_all);
+
+#endif /* DRM_IDR_COMPAT_FN */
+
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18))
+/**
+ * idr_replace - replace pointer for given id
+ * @idp: idr handle
+ * @ptr: pointer you want associated with the id
+ * @id: lookup key
+ *
+ * Replace the pointer registered with an id and return the old value.
+ * A -ENOENT return indicates that @id was not found.
+ * A -EINVAL return indicates that @id was not within valid constraints.
+ *
+ * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove().
+ */
+void *idr_replace(struct idr *idp, void *ptr, int id)
+{
+       int n;
+       struct idr_layer *p, *old_p;
+
+       n = idp->layers * IDR_BITS;
+       p = idp->top;
+
+       id &= MAX_ID_MASK;
+
+       if (id >= (1 << n))
+               return ERR_PTR(-EINVAL);
+
+       n -= IDR_BITS;
+       while ((n > 0) && p) {
+               p = p->ary[(id >> n) & IDR_MASK];
+               n -= IDR_BITS;
+       }
+
+       n = id & IDR_MASK;
+       if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
+               return ERR_PTR(-ENOENT);
+
+       old_p = p->ary[n];
+       p->ary[n] = ptr;
+
+       return (void *)old_p;
+}
+EXPORT_SYMBOL(idr_replace);
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
+static __inline__ unsigned long __round_jiffies(unsigned long j, int cpu)
+{
+       int rem;
+       unsigned long original = j;
+
+       j += cpu * 3;
+
+       rem = j % HZ;
+
+       if (rem < HZ/4) /* round down */
+               j = j - rem;
+       else /* round up */
+               j = j - rem + HZ;
+
+       /* now that we have rounded, subtract the extra skew again */
+       j -= cpu * 3;
+
+       if (j <= jiffies) /* rounding ate our timeout entirely; */
+               return original;
+       return j;
+}
+
+static __inline__ unsigned long __round_jiffies_relative(unsigned long j, int cpu)
+{
+       return  __round_jiffies(j + jiffies, cpu) - jiffies;
+}
+
+unsigned long round_jiffies_relative(unsigned long j)
+{
+       return __round_jiffies_relative(j, raw_smp_processor_id());
+}
+EXPORT_SYMBOL(round_jiffies_relative);
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
+struct pci_dev * pci_get_bus_and_slot(unsigned int bus, unsigned int devfn)
+{
+    struct pci_dev *dev = NULL;
+
+    while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
+        if (pci_domain_nr(dev->bus) == 0 &&
+           (dev->bus->number == bus && dev->devfn == devfn))
+            return dev;
+   }
+   return NULL;
+}
+EXPORT_SYMBOL(pci_get_bus_and_slot);
+#endif
+
+#if defined(DRM_KMAP_ATOMIC_PROT_PFN)
+#define drm_kmap_get_fixmap_pte(vaddr)                                 \
+       pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))
+
+void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type,
+                          pgprot_t protection)
+{
+       enum fixed_addresses idx;
+       unsigned long vaddr;
+       static pte_t *km_pte;
+       static int initialized = 0;
+
+       if (unlikely(!initialized)) {
+               km_pte = drm_kmap_get_fixmap_pte(__fix_to_virt(FIX_KMAP_BEGIN));
+               initialized = 1;
        }
-}      
 
+       pagefault_disable();
+       idx = type + KM_TYPE_NR*smp_processor_id();
+       vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+       set_pte(km_pte-idx, pfn_pte(pfn, protection));
+
+       return (void*) vaddr;
+}
+
+EXPORT_SYMBOL(kmap_atomic_prot_pfn);
 #endif