Lindent.
authorThomas Hellstrom <thomas-at-tungstengraphics-dot-com>
Tue, 17 Oct 2006 17:57:06 +0000 (19:57 +0200)
committerThomas Hellstrom <thomas-at-tungstengraphics-dot-com>
Tue, 17 Oct 2006 17:57:06 +0000 (19:57 +0200)
linux-core/drm_bo.c
linux-core/drm_fence.c
linux-core/drm_hashtab.c
linux-core/drm_mm.c
linux-core/drm_ttm.c
linux-core/drm_ttm.h
linux-core/i915_buffer.c
linux-core/i915_fence.c

index fb90098..e8e8a27 100644 (file)
@@ -67,7 +67,7 @@ static inline uint32_t drm_bo_type_flags(unsigned type)
 static inline drm_buffer_object_t *drm_bo_entry(struct list_head *list,
                                                unsigned type)
 {
-       switch(type) {
+       switch (type) {
        case DRM_BO_MEM_LOCAL:
        case DRM_BO_MEM_TT:
                return list_entry(list, drm_buffer_object_t, lru_ttm);
@@ -80,10 +80,10 @@ static inline drm_buffer_object_t *drm_bo_entry(struct list_head *list,
        return NULL;
 }
 
-static inline drm_mm_node_t *drm_bo_mm_node(drm_buffer_object_t *bo,
+static inline drm_mm_node_t *drm_bo_mm_node(drm_buffer_object_t * bo,
                                            unsigned type)
 {
-       switch(type) {
+       switch (type) {
        case DRM_BO_MEM_LOCAL:
        case DRM_BO_MEM_TT:
                return bo->node_ttm;
@@ -95,29 +95,38 @@ static inline drm_mm_node_t *drm_bo_mm_node(drm_buffer_object_t *bo,
        }
        return NULL;
 }
-               
+
 /*
  * bo locked. dev->struct_mutex locked.
  */
 
-static void drm_bo_add_to_lru(drm_buffer_object_t *buf,
-                             drm_buffer_manager_t *bm)
+static void drm_bo_add_to_lru(drm_buffer_object_t * buf,
+                             drm_buffer_manager_t * bm)
 {
        struct list_head *list;
        unsigned mem_type;
 
        if (buf->flags & DRM_BO_FLAG_MEM_TT) {
                mem_type = DRM_BO_MEM_TT;
-               list = (buf->flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ? &bm->pinned[mem_type] : &bm->lru[mem_type];
+               list =
+                   (buf->
+                    flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ?
+                   &bm->pinned[mem_type] : &bm->lru[mem_type];
                list_add_tail(&buf->lru_ttm, list);
        } else {
                mem_type = DRM_BO_MEM_LOCAL;
-               list = (buf->flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ? &bm->pinned[mem_type] : &bm->lru[mem_type];
+               list =
+                   (buf->
+                    flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ?
+                   &bm->pinned[mem_type] : &bm->lru[mem_type];
                list_add_tail(&buf->lru_ttm, list);
        }
        if (buf->flags & DRM_BO_FLAG_MEM_VRAM) {
                mem_type = DRM_BO_MEM_VRAM;
-               list = (buf->flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ? &bm->pinned[mem_type] : &bm->lru[mem_type];
+               list =
+                   (buf->
+                    flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ?
+                   &bm->pinned[mem_type] : &bm->lru[mem_type];
                list_add_tail(&buf->lru_card, list);
        }
 }
@@ -145,9 +154,8 @@ static int drm_move_tt_to_local(drm_buffer_object_t * buf, int evict,
                                schedule();
                        return ret;
                }
-       
-               if (!(buf->flags & DRM_BO_FLAG_NO_MOVE) ||
-                   force_no_move) {
+
+               if (!(buf->flags & DRM_BO_FLAG_NO_MOVE) || force_no_move) {
                        drm_mm_put_block(buf->node_ttm);
                        buf->node_ttm = NULL;
                }
@@ -169,14 +177,13 @@ static void drm_bo_destroy_locked(drm_device_t * dev, drm_buffer_object_t * bo)
 
        drm_buffer_manager_t *bm = &dev->bm;
 
-       
        DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
 
        /*
         * Somone might try to access us through the still active BM lists.
         */
 
-       if (atomic_read(&bo->usage) != 0) 
+       if (atomic_read(&bo->usage) != 0)
                return;
        if (!list_empty(&bo->ddestroy))
                return;
@@ -209,7 +216,7 @@ static void drm_bo_destroy_locked(drm_device_t * dev, drm_buffer_object_t * bo)
                /*
                 * This temporarily unlocks struct_mutex. 
                 */
-               
+
                do {
                        ret = drm_unbind_ttm(bo->ttm);
                        if (ret == -EAGAIN) {
@@ -224,7 +231,7 @@ static void drm_bo_destroy_locked(drm_device_t * dev, drm_buffer_object_t * bo)
                                  "Bad. Continuing anyway\n");
                }
        }
-               
+
        if (bo->node_ttm) {
                drm_mm_put_block(bo->node_ttm);
                bo->node_ttm = NULL;
@@ -249,8 +256,8 @@ static void drm_bo_delayed_delete(drm_device_t * dev)
        drm_fence_object_t *fence;
 
        mutex_lock(&dev->struct_mutex);
-       if (!bm->initialized) 
-           goto out;
+       if (!bm->initialized)
+               goto out;
 
        list = bm->ddestroy.next;
        list_for_each_safe(list, next, &bm->ddestroy) {
@@ -264,7 +271,7 @@ static void drm_bo_delayed_delete(drm_device_t * dev)
 
                if (atomic_read(&entry->usage) != 0)
                        continue;
-                       
+
                /*
                 * Since we're the only users, No need to take the 
                 * bo->mutex to watch the fence.
@@ -284,10 +291,10 @@ static void drm_bo_delayed_delete(drm_device_t * dev)
                         * drm_bo_destroy_locked temporarily releases the
                         * struct_mutex;
                         */
-                       
-                       nentry = NULL;          
+
+                       nentry = NULL;
                        if (next != &bm->ddestroy) {
-                               nentry = list_entry(next, drm_buffer_object_t, 
+                               nentry = list_entry(next, drm_buffer_object_t,
                                                    ddestroy);
                                atomic_inc(&nentry->usage);
                        }
@@ -296,13 +303,12 @@ static void drm_bo_delayed_delete(drm_device_t * dev)
                        drm_bo_destroy_locked(dev, entry);
                        if (next != &bm->ddestroy)
                                atomic_dec(&nentry->usage);
-               } 
+               }
        }
-  out:
+      out:
        mutex_unlock(&dev->struct_mutex);
 }
 
-
 static void drm_bo_delayed_workqueue(void *data)
 {
        drm_device_t *dev = (drm_device_t *) data;
@@ -403,8 +409,8 @@ int drm_fence_buffer_objects(drm_file_t * priv,
                }
        } else {
                mutex_unlock(&dev->struct_mutex);
-               ret = drm_fence_object_create(dev, fence_type, 
-                                             fence_flags | DRM_FENCE_FLAG_EMIT, 
+               ret = drm_fence_object_create(dev, fence_type,
+                                             fence_flags | DRM_FENCE_FLAG_EMIT,
                                              &fence);
                mutex_lock(&dev->struct_mutex);
                if (ret)
@@ -470,9 +476,9 @@ static int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
                ret =
                    drm_fence_object_wait(dev, fence, lazy, ignore_signals,
                                          bo->fence_type);
-               if (ret) 
-                   return ret;
-               
+               if (ret)
+                       return ret;
+
                drm_fence_usage_deref_unlocked(dev, fence);
                bo->fence = NULL;
 
@@ -484,7 +490,7 @@ static int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
  * bo->mutex locked 
  */
 
-static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, 
+static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type,
                        int no_wait, int force_no_move)
 {
        int ret = 0;
@@ -495,7 +501,7 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type,
         * Someone might have modified the buffer before we took the buffer mutex.
         */
 
-       if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) 
+       if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
                goto out;
        if (!(bo->flags & drm_bo_type_flags(mem_type)))
                goto out;
@@ -531,7 +537,7 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type,
 
        DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
                        _DRM_BO_FLAG_EVICTED);
- out:
     out:
        return ret;
 }
 
@@ -539,7 +545,7 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type,
  * buf->mutex locked.
  */
 
-int drm_bo_alloc_space(drm_buffer_object_t * buf, unsigned mem_type, 
+int drm_bo_alloc_space(drm_buffer_object_t * buf, unsigned mem_type,
                       int no_wait)
 {
        drm_device_t *dev = buf->dev;
@@ -601,7 +607,7 @@ static int drm_move_local_to_tt(drm_buffer_object_t * bo, int no_wait)
        drm_ttm_backend_t *be;
        int ret;
 
-       if (!(bo->node_ttm && (bo->flags & DRM_BO_FLAG_NO_MOVE))) { 
+       if (!(bo->node_ttm && (bo->flags & DRM_BO_FLAG_NO_MOVE))) {
                BUG_ON(bo->node_ttm);
                ret = drm_bo_alloc_space(bo, DRM_BO_MEM_TT, no_wait);
                if (ret)
@@ -653,18 +659,19 @@ static int drm_bo_new_flags(drm_device_t * dev,
         * First adjust the mask to take away nonexistant memory types. 
         */
 
-       for (i=0; i<DRM_BO_MEM_TYPES; ++i) {
+       for (i = 0; i < DRM_BO_MEM_TYPES; ++i) {
                if (!bm->use_type[i])
                        new_mask &= ~drm_bo_type_flags(i);
        }
 
-       if ((new_mask & DRM_BO_FLAG_NO_EVICT ) && !DRM_SUSER(DRM_CURPROC)) {
-               DRM_ERROR("DRM_BO_FLAG_NO_EVICT is only available to priviliged "
-                         "processes\n");
+       if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
+               DRM_ERROR
+                   ("DRM_BO_FLAG_NO_EVICT is only available to priviliged "
+                    "processes\n");
                return -EPERM;
        }
        if (new_mask & DRM_BO_FLAG_BIND_CACHED) {
-               if (((new_mask & DRM_BO_FLAG_MEM_TT) && 
+               if (((new_mask & DRM_BO_FLAG_MEM_TT) &&
                     !driver->cached[DRM_BO_MEM_TT]) &&
                    ((new_mask & DRM_BO_FLAG_MEM_VRAM)
                     && !driver->cached[DRM_BO_MEM_VRAM])) {
@@ -831,12 +838,12 @@ static int drm_bo_read_cached(drm_buffer_object_t * bo)
        int ret = 0;
 
        BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
-       if (bo->node_card) 
+       if (bo->node_card)
                ret = drm_bo_evict(bo, DRM_BO_MEM_VRAM, 1, 0);
        if (ret)
                return ret;
        if (bo->node_ttm)
-               ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1, 0);    
+               ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1, 0);
        return ret;
 }
 
@@ -1155,18 +1162,18 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo,
                DRM_ERROR("Driver did not support given buffer permissions\n");
                return ret;
        }
-       
+
        /*
         * Move out if we need to change caching policy.
         */
 
-       if ((flag_diff & DRM_BO_FLAG_BIND_CACHED) && 
+       if ((flag_diff & DRM_BO_FLAG_BIND_CACHED) &&
            !(bo->flags & DRM_BO_FLAG_MEM_LOCAL)) {
                if (bo->flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
                        DRM_ERROR("Cannot change caching policy of "
                                  "pinned buffer.\n");
                        return -EINVAL;
-               }                       
+               }
                ret = drm_bo_move_buffer(bo, DRM_BO_FLAG_MEM_LOCAL, no_wait, 0);
                if (ret) {
                        if (ret != -EAGAIN)
@@ -1182,7 +1189,7 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo,
         * release reserved manager regions.
         */
 
-       if ((flag_diff & DRM_BO_FLAG_NO_MOVE) && 
+       if ((flag_diff & DRM_BO_FLAG_NO_MOVE) &&
            !(new_flags & DRM_BO_FLAG_NO_MOVE)) {
                mutex_lock(&dev->struct_mutex);
                if (bo->node_ttm) {
@@ -1434,10 +1441,10 @@ int drm_buffer_object_create(drm_file_t * priv,
        mutex_unlock(&bo->mutex);
        *buf_obj = bo;
        return 0;
-       
- out_err:
+
     out_err:
        mutex_unlock(&bo->mutex);
-       drm_bo_usage_deref_unlocked(dev, bo);   
+       drm_bo_usage_deref_unlocked(dev, bo);
        return ret;
 }
 
@@ -1607,11 +1614,10 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS)
  * dev->struct_sem locked.
  */
 
-static int drm_bo_force_list_clean(drm_device_t *dev,
-                                  struct list_head *head, 
+static int drm_bo_force_list_clean(drm_device_t * dev,
+                                  struct list_head *head,
                                   unsigned mem_type,
-                                  int force_no_move,
-                                  int allow_errors)
+                                  int force_no_move, int allow_errors)
 {
        drm_buffer_manager_t *bm = &dev->bm;
        struct list_head *list, *next, *prev;
@@ -1619,11 +1625,11 @@ static int drm_bo_force_list_clean(drm_device_t *dev,
        int ret;
        int clean;
 
- retry:
     retry:
        clean = 1;
        list_for_each_safe(list, next, head) {
                prev = list->prev;
-               entry = drm_bo_entry(list, mem_type);                   
+               entry = drm_bo_entry(list, mem_type);
                atomic_inc(&entry->usage);
                mutex_unlock(&dev->struct_mutex);
                mutex_lock(&entry->mutex);
@@ -1639,10 +1645,10 @@ static int drm_bo_force_list_clean(drm_device_t *dev,
                        /*
                         * Expire the fence.
                         */
-                       
+
                        mutex_unlock(&dev->struct_mutex);
                        if (entry->fence && bm->nice_mode) {
-                               unsigned long _end = jiffies + 3*DRM_HZ;
+                               unsigned long _end = jiffies + 3 * DRM_HZ;
                                do {
                                        ret = drm_bo_wait(entry, 0, 1, 0);
                                        if (ret && allow_errors) {
@@ -1651,7 +1657,7 @@ static int drm_bo_force_list_clean(drm_device_t *dev,
                                                goto out_err;
                                        }
                                } while (ret && !time_after_eq(jiffies, _end));
-                               
+
                                if (entry->fence) {
                                        bm->nice_mode = 0;
                                        DRM_ERROR("Detected GPU hang or "
@@ -1660,14 +1666,17 @@ static int drm_bo_force_list_clean(drm_device_t *dev,
                                }
                        }
                        if (entry->fence) {
-                               drm_fence_usage_deref_unlocked(dev, entry->fence);
+                               drm_fence_usage_deref_unlocked(dev,
+                                                              entry->fence);
                                entry->fence = NULL;
                        }
 
-                       DRM_MASK_VAL(entry->priv_flags, _DRM_BO_FLAG_UNFENCED, 0);
+                       DRM_MASK_VAL(entry->priv_flags, _DRM_BO_FLAG_UNFENCED,
+                                    0);
 
                        if (force_no_move) {
-                               DRM_MASK_VAL(entry->flags, DRM_BO_FLAG_NO_MOVE, 0);
+                               DRM_MASK_VAL(entry->flags, DRM_BO_FLAG_NO_MOVE,
+                                            0);
                        }
                        if (entry->flags & DRM_BO_FLAG_NO_EVICT) {
                                DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
@@ -1690,12 +1699,12 @@ static int drm_bo_force_list_clean(drm_device_t *dev,
                drm_bo_usage_deref_locked(dev, entry);
                if (prev != list->prev || next != list->next) {
                        goto retry;
-               }               
+               }
        }
        if (!clean)
                goto retry;
        return 0;
- out_err:
     out_err:
        mutex_unlock(&entry->mutex);
        drm_bo_usage_deref_unlocked(dev, entry);
        mutex_lock(&dev->struct_mutex);
@@ -1715,7 +1724,7 @@ int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type)
        if (!bm->has_type[mem_type]) {
                DRM_ERROR("Trying to take down uninitialized "
                          "memory manager type\n");
-               return ret;
+               return ret;
        }
        bm->use_type[mem_type] = 0;
        bm->has_type[mem_type] = 0;
@@ -1733,10 +1742,12 @@ int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type)
                 * Throw out evicted no-move buffers.
                 */
 
-               drm_bo_force_list_clean(dev, &bm->pinned[DRM_BO_MEM_LOCAL], 
+               drm_bo_force_list_clean(dev, &bm->pinned[DRM_BO_MEM_LOCAL],
                                        mem_type, 1, 0);
-               drm_bo_force_list_clean(dev, &bm->lru[mem_type], mem_type, 1, 0);
-               drm_bo_force_list_clean(dev, &bm->pinned[mem_type], mem_type, 1, 0);
+               drm_bo_force_list_clean(dev, &bm->lru[mem_type], mem_type, 1,
+                                       0);
+               drm_bo_force_list_clean(dev, &bm->pinned[mem_type], mem_type, 1,
+                                       0);
 
                if (drm_mm_clean(&bm->manager[mem_type])) {
                        drm_mm_takedown(&bm->manager[mem_type]);
@@ -1748,32 +1759,30 @@ int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type)
        return ret;
 }
 
-static int drm_bo_lock_mm(drm_device_t *dev, unsigned mem_type)
+static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type)
 {
        int ret;
        drm_buffer_manager_t *bm = &dev->bm;
 
        if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
-               DRM_ERROR("Illegal memory manager memory type %u,\n", 
-                         mem_type);
+               DRM_ERROR("Illegal memory manager memory type %u,\n", mem_type);
                return -EINVAL;
        }
-       
-       ret = drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 0, 1);  
-       if (ret)
+
+       ret = drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 0, 1);
+       if (ret)
                return ret;
-       ret = drm_bo_force_list_clean(dev, &bm->lru[mem_type], mem_type, 0, 1);         
+       ret = drm_bo_force_list_clean(dev, &bm->lru[mem_type], mem_type, 0, 1);
        if (ret)
                return ret;
-       ret = drm_bo_force_list_clean(dev, &bm->pinned[mem_type], mem_type, 0, 1);  
+       ret =
+           drm_bo_force_list_clean(dev, &bm->pinned[mem_type], mem_type, 0, 1);
        return ret;
 }
 
-
-static int drm_bo_init_mm(drm_device_t *dev,
+static int drm_bo_init_mm(drm_device_t * dev,
                          unsigned type,
-                         unsigned long p_offset,
-                         unsigned long p_size)
+                         unsigned long p_offset, unsigned long p_size)
 {
        drm_buffer_manager_t *bm = &dev->bm;
        int ret = -EINVAL;
@@ -1794,7 +1803,7 @@ static int drm_bo_init_mm(drm_device_t *dev,
                        DRM_ERROR("Zero size memory manager type %d\n", type);
                        return ret;
                }
-               ret = drm_mm_init(&bm->manager[type],p_offset, p_size);
+               ret = drm_mm_init(&bm->manager[type], p_offset, p_size);
                if (ret)
                        return ret;
        }
@@ -1807,8 +1816,7 @@ static int drm_bo_init_mm(drm_device_t *dev,
        return 0;
 }
 
-
-int drm_bo_driver_finish(drm_device_t *dev)
+int drm_bo_driver_finish(drm_device_t * dev)
 {
        drm_buffer_manager_t *bm = &dev->bm;
        int ret = 0;
@@ -1817,10 +1825,10 @@ int drm_bo_driver_finish(drm_device_t *dev)
        mutex_lock(&dev->bm.init_mutex);
        mutex_lock(&dev->struct_mutex);
 
-       if (!bm->initialized) 
+       if (!bm->initialized)
                goto out;
 
-       while(i--) {
+       while (i--) {
                if (bm->has_type[i]) {
                        bm->use_type[i] = 0;
                        if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
@@ -1840,25 +1848,24 @@ int drm_bo_driver_finish(drm_device_t *dev)
                flush_scheduled_work();
        }
        mutex_lock(&dev->struct_mutex);
- out:
     out:
        mutex_unlock(&dev->struct_mutex);
        mutex_unlock(&dev->bm.init_mutex);
        return ret;
 }
-                               
 
-int drm_bo_driver_init(drm_device_t *dev)
+int drm_bo_driver_init(drm_device_t * dev)
 {
        drm_bo_driver_t *driver = dev->driver->bo_driver;
        drm_buffer_manager_t *bm = &dev->bm;
        int ret = -EINVAL;
        struct sysinfo si;
-       
+
        mutex_lock(&dev->bm.init_mutex);
        mutex_lock(&dev->struct_mutex);
        if (!driver)
                goto out_unlock;
-       
+
        /*
         * Initialize the system memory buffer type.
         * Other types need to be driver / IOCTL initialized.
@@ -1876,14 +1883,14 @@ int drm_bo_driver_init(drm_device_t *dev)
        si_meminfo(&si);
        bm->max_pages = si.totalram >> 1;
        INIT_LIST_HEAD(&bm->unfenced);
-       INIT_LIST_HEAD(&bm->ddestroy);  
- out_unlock:
+       INIT_LIST_HEAD(&bm->ddestroy);
     out_unlock:
        mutex_unlock(&dev->struct_mutex);
        mutex_unlock(&dev->bm.init_mutex);
        return ret;
 }
-EXPORT_SYMBOL(drm_bo_driver_init);     
 
+EXPORT_SYMBOL(drm_bo_driver_init);
 
 int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
 {
@@ -1911,15 +1918,15 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
                        break;
                }
                if (arg.req.mem_type == 0) {
-                       DRM_ERROR("System memory buffers already initialized.\n");
+                       DRM_ERROR
+                           ("System memory buffers already initialized.\n");
                        break;
                }
-               ret = drm_bo_init_mm(dev, arg.req.mem_type, 
-                                    arg.req.p_offset,
-                                    arg.req.p_size);
+               ret = drm_bo_init_mm(dev, arg.req.mem_type,
+                                    arg.req.p_offset, arg.req.p_size);
                break;
        case mm_takedown:
-               LOCK_TEST_WITH_RETURN(dev, filp);
+               LOCK_TEST_WITH_RETURN(dev, filp);
                mutex_lock(&dev->bm.init_mutex);
                mutex_lock(&dev->struct_mutex);
                ret = -EINVAL;
@@ -1937,36 +1944,38 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
                                  "Delaying takedown\n", arg.req.mem_type);
                }
                break;
-       case mm_set_max_pages: {
-               struct sysinfo si;
-               mutex_lock(&dev->bm.init_mutex);
-               mutex_lock(&dev->struct_mutex);
-               if (arg.req.p_size < bm->cur_pages) {
-                       DRM_ERROR("Cannot currently decrease max number of "
-                                 "locked pages below the number currently "
-                                 "locked.\n");
-                       ret = -EINVAL;
-                       break;
-               }
-               si_meminfo(&si);
-               if (arg.req.p_size > si.totalram) {
-                       DRM_ERROR("Cannot set max number of locked pages "
-                                 "to %lu since the total number of RAM pages "
-                                 "is %lu.\n", (unsigned long) arg.req.p_size, 
-                                 (unsigned long) si.totalram);
-                       ret = -EINVAL;
-                       break;
+       case mm_set_max_pages:{
+                       struct sysinfo si;
+                       mutex_lock(&dev->bm.init_mutex);
+                       mutex_lock(&dev->struct_mutex);
+                       if (arg.req.p_size < bm->cur_pages) {
+                               DRM_ERROR
+                                   ("Cannot currently decrease max number of "
+                                    "locked pages below the number currently "
+                                    "locked.\n");
+                               ret = -EINVAL;
+                               break;
+                       }
+                       si_meminfo(&si);
+                       if (arg.req.p_size > si.totalram) {
+                               DRM_ERROR
+                                   ("Cannot set max number of locked pages "
+                                    "to %lu since the total number of RAM pages "
+                                    "is %lu.\n", (unsigned long)arg.req.p_size,
+                                    (unsigned long)si.totalram);
+                               ret = -EINVAL;
+                               break;
+                       }
+                       bm->max_pages = arg.req.p_size;
                }
-               bm->max_pages = arg.req.p_size;
-       }
        case mm_lock:
-               LOCK_TEST_WITH_RETURN(dev, filp);
+               LOCK_TEST_WITH_RETURN(dev, filp);
                mutex_lock(&dev->bm.init_mutex);
                mutex_lock(&dev->struct_mutex);
                ret = drm_bo_lock_mm(dev, arg.req.mem_type);
                break;
        case mm_unlock:
-               LOCK_TEST_WITH_RETURN(dev, filp);
+               LOCK_TEST_WITH_RETURN(dev, filp);
                mutex_lock(&dev->bm.init_mutex);
                mutex_lock(&dev->struct_mutex);
                ret = 0;
index c9a2a06..aa38204 100644 (file)
@@ -31,7 +31,6 @@
 
 #include "drmP.h"
 
-
 /*
  * Typically called by the IRQ handler.
  */
@@ -90,7 +89,7 @@ void drm_fence_handler(drm_device_t * dev, uint32_t sequence, uint32_t type)
                }
 
        }
-               
+
        if (wake) {
                DRM_WAKEUP(&fm->fence_queue);
        }
@@ -132,8 +131,8 @@ void drm_fence_usage_deref_unlocked(drm_device_t * dev,
                mutex_lock(&dev->struct_mutex);
                if (atomic_read(&fence->usage) == 0) {
                        drm_fence_unring(dev, &fence->ring);
-                       atomic_dec(&fm->count);
-                       drm_ctl_cache_free(drm_cache.fence_object, 
+                       atomic_dec(&fm->count);
+                       drm_ctl_cache_free(drm_cache.fence_object,
                                           sizeof(*fence), fence);
                }
                mutex_unlock(&dev->struct_mutex);
@@ -150,7 +149,7 @@ static void drm_fence_object_destroy(drm_file_t * priv,
        drm_fence_usage_deref_locked(dev, fence);
 }
 
-static int fence_signaled(drm_device_t * dev, volatile 
+static int fence_signaled(drm_device_t * dev, volatile
                          drm_fence_object_t * fence,
                          uint32_t mask, int poke_flush)
 {
@@ -205,15 +204,14 @@ static void drm_fence_flush_exe(drm_fence_manager_t * fm,
        }
 }
 
-int drm_fence_object_signaled(volatile drm_fence_object_t * fence, 
+int drm_fence_object_signaled(volatile drm_fence_object_t * fence,
                              uint32_t type)
 {
        return ((fence->signaled & type) == type);
 }
 
 int drm_fence_object_flush(drm_device_t * dev,
-                          volatile drm_fence_object_t * fence, 
-                          uint32_t type)
+                          volatile drm_fence_object_t * fence, uint32_t type)
 {
        drm_fence_manager_t *fm = &dev->fm;
        drm_fence_driver_t *driver = dev->driver->fence_driver;
@@ -221,7 +219,7 @@ int drm_fence_object_flush(drm_device_t * dev,
 
        if (type & ~fence->type) {
                DRM_ERROR("Flush trying to extend fence type, "
-                          "0x%x, 0x%x\n", type, fence->type);
+                         "0x%x, 0x%x\n", type, fence->type);
                return -EINVAL;
        }
 
@@ -248,7 +246,6 @@ int drm_fence_object_flush(drm_device_t * dev,
  * wrapped around and reused.
  */
 
-
 void drm_fence_flush_old(drm_device_t * dev, uint32_t sequence)
 {
        drm_fence_manager_t *fm = &dev->fm;
@@ -279,7 +276,7 @@ void drm_fence_flush_old(drm_device_t * dev, uint32_t sequence)
 
 EXPORT_SYMBOL(drm_fence_flush_old);
 
-int drm_fence_object_wait(drm_device_t * dev, 
+int drm_fence_object_wait(drm_device_t * dev,
                          volatile drm_fence_object_t * fence,
                          int lazy, int ignore_signals, uint32_t mask)
 {
@@ -328,8 +325,8 @@ int drm_fence_object_wait(drm_device_t * dev,
 
                do {
                        DRM_WAIT_ON(ret, fm->fence_queue, 3 * DRM_HZ,
-                                   fence_signaled(dev, fence, DRM_FENCE_TYPE_EXE,
-                                                  1));
+                                   fence_signaled(dev, fence,
+                                                  DRM_FENCE_TYPE_EXE, 1));
                        if (time_after_eq(jiffies, _end))
                                break;
                } while (ret == -EINTR && ignore_signals);
@@ -347,9 +344,9 @@ int drm_fence_object_wait(drm_device_t * dev,
         */
 #if 1
        if (!ignore_signals)
-               return -EAGAIN;
+               return -EAGAIN;
 #endif
-       do { 
+       do {
                schedule();
                signaled = fence_signaled(dev, fence, mask, 1);
        } while (!signaled && !time_after_eq(jiffies, _end));
@@ -387,7 +384,7 @@ int drm_fence_object_emit(drm_device_t * dev, drm_fence_object_t * fence,
        return 0;
 }
 
-static int drm_fence_object_init(drm_device_t * dev, uint32_t type, 
+static int drm_fence_object_init(drm_device_t * dev, uint32_t type,
                                 uint32_t fence_flags,
                                 drm_fence_object_t * fence)
 {
@@ -414,7 +411,6 @@ static int drm_fence_object_init(drm_device_t * dev, uint32_t type,
        return ret;
 }
 
-
 int drm_fence_add_user_object(drm_file_t * priv, drm_fence_object_t * fence,
                              int shareable)
 {
@@ -441,7 +437,7 @@ int drm_fence_object_create(drm_device_t * dev, uint32_t type,
        int ret;
        drm_fence_manager_t *fm = &dev->fm;
 
-       fence = drm_ctl_cache_alloc(drm_cache.fence_object, 
+       fence = drm_ctl_cache_alloc(drm_cache.fence_object,
                                    sizeof(*fence), GFP_KERNEL);
        if (!fence)
                return -ENOMEM;
@@ -472,7 +468,7 @@ void drm_fence_manager_init(drm_device_t * dev)
        fm->initialized = 0;
        if (fed) {
                fm->initialized = 1;
-               atomic_set(&fm->count,0);
+               atomic_set(&fm->count, 0);
                for (i = 0; i < fed->no_types; ++i) {
                        fm->fence_types[i] = &fm->ring;
                }
@@ -523,9 +519,7 @@ int drm_fence_ioctl(DRM_IOCTL_ARGS)
        case drm_fence_create:
                if (arg.flags & DRM_FENCE_FLAG_EMIT)
                        LOCK_TEST_WITH_RETURN(dev, filp);
-               ret = drm_fence_object_create(dev, arg.type,
-                                             arg.flags,
-                                             &fence);
+               ret = drm_fence_object_create(dev, arg.type, arg.flags, &fence);
                if (ret)
                        return ret;
                ret = drm_fence_add_user_object(priv, fence,
@@ -596,7 +590,7 @@ int drm_fence_ioctl(DRM_IOCTL_ARGS)
                        return -EINVAL;
                }
                LOCK_TEST_WITH_RETURN(dev, filp);
-               ret = drm_fence_buffer_objects(priv, NULL, arg.flags, 
+               ret = drm_fence_buffer_objects(priv, NULL, arg.flags,
                                               NULL, &fence);
                if (ret)
                        return ret;
index 3a2aa80..6f17e11 100644 (file)
@@ -36,7 +36,7 @@
 #include "drm_hashtab.h"
 #include <linux/hash.h>
 
-int drm_ht_create(drm_open_hash_t *ht, unsigned int order)
+int drm_ht_create(drm_open_hash_t * ht, unsigned int order)
 {
        unsigned int i;
 
@@ -46,24 +46,24 @@ int drm_ht_create(drm_open_hash_t *ht, unsigned int order)
        ht->table = NULL;
        ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > PAGE_SIZE);
        if (!ht->use_vmalloc) {
-               ht->table = drm_calloc(ht->size, sizeof(*ht->table), 
+               ht->table = drm_calloc(ht->size, sizeof(*ht->table),
                                       DRM_MEM_HASHTAB);
-       } 
+       }
        if (!ht->table) {
                ht->use_vmalloc = 1;
-               ht->table = vmalloc(ht->size*sizeof(*ht->table));       
-       } 
+               ht->table = vmalloc(ht->size * sizeof(*ht->table));
+       }
        if (!ht->table) {
                DRM_ERROR("Out of memory for hash table\n");
                return -ENOMEM;
        }
-       for (i=0; i< ht->size; ++i) {
+       for (i = 0; i < ht->size; ++i) {
                INIT_HLIST_HEAD(&ht->table[i]);
        }
        return 0;
 }
 
-void drm_ht_verbose_list(drm_open_hash_t *ht, unsigned long key)
+void drm_ht_verbose_list(drm_open_hash_t * ht, unsigned long key)
 {
        drm_hash_item_t *entry;
        struct hlist_head *h_list;
@@ -80,7 +80,7 @@ void drm_ht_verbose_list(drm_open_hash_t *ht, unsigned long key)
        }
 }
 
-static struct hlist_node *drm_ht_find_key(drm_open_hash_t *ht, 
+static struct hlist_node *drm_ht_find_key(drm_open_hash_t * ht,
                                          unsigned long key)
 {
        drm_hash_item_t *entry;
@@ -100,8 +100,7 @@ static struct hlist_node *drm_ht_find_key(drm_open_hash_t *ht,
        return NULL;
 }
 
-
-int drm_ht_insert_item(drm_open_hash_t *ht, drm_hash_item_t *item)
+int drm_ht_insert_item(drm_open_hash_t * ht, drm_hash_item_t * item)
 {
        drm_hash_item_t *entry;
        struct hlist_head *h_list;
@@ -132,7 +131,7 @@ int drm_ht_insert_item(drm_open_hash_t *ht, drm_hash_item_t *item)
  * Just insert an item and return any "bits" bit key that hasn't been 
  * used before.
  */
-int drm_ht_just_insert_please(drm_open_hash_t *ht, drm_hash_item_t *item,
+int drm_ht_just_insert_please(drm_open_hash_t * ht, drm_hash_item_t * item,
                              unsigned long seed, int bits, int shift,
                              unsigned long add)
 {
@@ -147,7 +146,7 @@ int drm_ht_just_insert_please(drm_open_hash_t *ht, drm_hash_item_t *item,
                ret = drm_ht_insert_item(ht, item);
                if (ret)
                        unshifted_key = (unshifted_key + 1) & mask;
-       } while(ret && (unshifted_key != first));
+       } while (ret && (unshifted_key != first));
 
        if (ret) {
                DRM_ERROR("Available key bit space exhausted\n");
@@ -156,8 +155,8 @@ int drm_ht_just_insert_please(drm_open_hash_t *ht, drm_hash_item_t *item,
        return 0;
 }
 
-int drm_ht_find_item(drm_open_hash_t *ht, unsigned long key,
-                    drm_hash_item_t **item)
+int drm_ht_find_item(drm_open_hash_t * ht, unsigned long key,
+                    drm_hash_item_t ** item)
 {
        struct hlist_node *list;
 
@@ -169,7 +168,7 @@ int drm_ht_find_item(drm_open_hash_t *ht, unsigned long key,
        return 0;
 }
 
-int drm_ht_remove_key(drm_open_hash_t *ht, unsigned long key)
+int drm_ht_remove_key(drm_open_hash_t * ht, unsigned long key)
 {
        struct hlist_node *list;
 
@@ -182,22 +181,21 @@ int drm_ht_remove_key(drm_open_hash_t *ht, unsigned long key)
        return -EINVAL;
 }
 
-int drm_ht_remove_item(drm_open_hash_t *ht, drm_hash_item_t *item)
+int drm_ht_remove_item(drm_open_hash_t * ht, drm_hash_item_t * item)
 {
        hlist_del_init(&item->head);
        ht->fill--;
        return 0;
 }
 
-void drm_ht_remove(drm_open_hash_t *ht)
+void drm_ht_remove(drm_open_hash_t * ht)
 {
        if (ht->table) {
-               if (ht->use_vmalloc) 
+               if (ht->use_vmalloc)
                        vfree(ht->table);
                else
-                       drm_free(ht->table, ht->size*sizeof(*ht->table), 
+                       drm_free(ht->table, ht->size * sizeof(*ht->table),
                                 DRM_MEM_HASHTAB);
                ht->table = NULL;
        }
 }
-
index 6ab13af..4af33bd 100644 (file)
@@ -59,9 +59,9 @@ drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent,
                return parent;
        } else {
 
-               child = (drm_mm_node_t *) 
-                       drm_ctl_cache_alloc(drm_cache.mm, sizeof(*child), 
-                                           GFP_KERNEL);
+               child = (drm_mm_node_t *)
+                   drm_ctl_cache_alloc(drm_cache.mm, sizeof(*child),
+                                       GFP_KERNEL);
                if (!child)
                        return NULL;
 
@@ -111,8 +111,8 @@ void drm_mm_put_block(drm_mm_node_t * cur)
                                prev_node->size += next_node->size;
                                list_del(&next_node->ml_entry);
                                list_del(&next_node->fl_entry);
-                               drm_ctl_cache_free(drm_cache.mm, 
-                                                  sizeof(*next_node), 
+                               drm_ctl_cache_free(drm_cache.mm,
+                                                  sizeof(*next_node),
                                                   next_node);
                        } else {
                                next_node->size += cur->size;
@@ -161,9 +161,9 @@ drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm,
        return best;
 }
 
-int drm_mm_clean(drm_mm_t *mm) 
+int drm_mm_clean(drm_mm_t * mm)
 {
-        struct list_head *head = &mm->root_node.ml_entry;
+       struct list_head *head = &mm->root_node.ml_entry;
 
        return (head->next->next == head);
 }
@@ -175,9 +175,8 @@ int drm_mm_init(drm_mm_t * mm, unsigned long start, unsigned long size)
        INIT_LIST_HEAD(&mm->root_node.ml_entry);
        INIT_LIST_HEAD(&mm->root_node.fl_entry);
 
-
-       child = (drm_mm_node_t *) 
-               drm_ctl_cache_alloc(drm_cache.mm, sizeof(*child), GFP_KERNEL);
+       child = (drm_mm_node_t *)
+           drm_ctl_cache_alloc(drm_cache.mm, sizeof(*child), GFP_KERNEL);
 
        if (!child)
                return -ENOMEM;
index 3e66319..599589f 100644 (file)
@@ -32,7 +32,6 @@
  * Use kmalloc if possible. Otherwise fall back to vmalloc.
  */
 
-
 static void *ttm_alloc(unsigned long size, int type)
 {
        void *ret = NULL;
@@ -53,15 +52,15 @@ static void *ttm_alloc(unsigned long size, int type)
 
 static void ttm_free(void *pointer, unsigned long size, int type)
 {
-  
-       if ((unsigned long) pointer >= VMALLOC_START &&
-           (unsigned long) pointer <= VMALLOC_END) {
+
+       if ((unsigned long)pointer >= VMALLOC_START &&
+           (unsigned long)pointer <= VMALLOC_END) {
                vfree(pointer);
        } else {
                drm_free(pointer, size, type);
        }
        drm_free_memctl(size);
-}              
+}
 
 /*
  * Unmap all vma pages from vmas mapping this ttm.
@@ -155,7 +154,7 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
 
        if (ttm->pages) {
                drm_buffer_manager_t *bm = &ttm->dev->bm;
-               if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) 
+               if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED)
                        drm_set_caching(ttm, 0);
 
                for (i = 0; i < ttm->num_pages; ++i) {
@@ -184,7 +183,7 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
                                --bm->cur_pages;
                        }
                }
-               ttm_free(ttm->pages, ttm->num_pages*sizeof(*ttm->pages),
+               ttm_free(ttm->pages, ttm->num_pages * sizeof(*ttm->pages),
                         DRM_MEM_TTM);
                ttm->pages = NULL;
        }
@@ -193,20 +192,19 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
        return 0;
 }
 
-static int drm_ttm_populate(drm_ttm_t *ttm)
+static int drm_ttm_populate(drm_ttm_t * ttm)
 {
        struct page *page;
        unsigned long i;
        drm_buffer_manager_t *bm;
        drm_ttm_backend_t *be;
 
-
-       if (ttm->state != ttm_unpopulated) 
+       if (ttm->state != ttm_unpopulated)
                return 0;
-       
+
        bm = &ttm->dev->bm;
        be = ttm->be;
-       for (i=0; i<ttm->num_pages; ++i) {
+       for (i = 0; i < ttm->num_pages; ++i) {
                page = ttm->pages[i];
                if (!page) {
                        if (drm_alloc_memctl(PAGE_SIZE)) {
@@ -229,9 +227,7 @@ static int drm_ttm_populate(drm_ttm_t *ttm)
        be->populate(be, ttm->num_pages, ttm->pages);
        ttm->state = ttm_unbound;
        return 0;
-}              
-              
-
+}
 
 /*
  * Initialize a ttm.
@@ -266,7 +262,7 @@ static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size)
         * Account also for AGP module memory usage.
         */
 
-       ttm->pages = ttm_alloc(ttm->num_pages * sizeof(*ttm->pages), 
+       ttm->pages = ttm_alloc(ttm->num_pages * sizeof(*ttm->pages),
                               DRM_MEM_TTM);
        if (!ttm->pages) {
                drm_destroy_ttm(ttm);
@@ -321,13 +317,12 @@ void drm_fixup_ttm_caching(drm_ttm_t * ttm)
                ttm->state = ttm_unbound;
        }
 }
-               
 
 int drm_unbind_ttm(drm_ttm_t * ttm)
 {
        int ret = 0;
 
-       if (ttm->state == ttm_bound) 
+       if (ttm->state == ttm_bound)
                ret = drm_evict_ttm(ttm);
 
        if (ret)
@@ -337,8 +332,7 @@ int drm_unbind_ttm(drm_ttm_t * ttm)
        return 0;
 }
 
-int drm_bind_ttm(drm_ttm_t * ttm, int cached,
-                unsigned long aper_offset)
+int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset)
 {
 
        int ret = 0;
@@ -350,7 +344,7 @@ int drm_bind_ttm(drm_ttm_t * ttm, int cached,
                return 0;
 
        be = ttm->be;
-       
+
        ret = drm_ttm_populate(ttm);
        if (ret)
                return ret;
@@ -361,7 +355,7 @@ int drm_bind_ttm(drm_ttm_t * ttm, int cached,
 
                drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
        }
-#ifdef DRM_ODD_MM_COMPAT 
+#ifdef DRM_ODD_MM_COMPAT
        else if (ttm->state == ttm_evicted && !cached) {
                ret = drm_ttm_lock_mm(ttm);
                if (ret)
@@ -378,18 +372,17 @@ int drm_bind_ttm(drm_ttm_t * ttm, int cached,
                return ret;
        }
 
-                       
        ttm->aper_offset = aper_offset;
        ttm->state = ttm_bound;
 
 #ifdef DRM_ODD_MM_COMPAT
        if (be->needs_ub_cache_adjust(be)) {
                ret = drm_ttm_remap_bound(ttm);
-               if (ret) 
+               if (ret)
                        return ret;
        }
 #endif
-                       
+
        return 0;
 }
 
@@ -448,8 +441,7 @@ void drm_ttm_object_deref_unlocked(drm_device_t * dev, drm_ttm_object_t * to)
  */
 
 int drm_ttm_object_create(drm_device_t * dev, unsigned long size,
-                         uint32_t flags, 
-                         drm_ttm_object_t ** ttm_object)
+                         uint32_t flags, drm_ttm_object_t ** ttm_object)
 {
        drm_ttm_object_t *object;
        drm_map_list_t *list;
@@ -476,21 +468,20 @@ int drm_ttm_object_create(drm_device_t * dev, unsigned long size,
                return -ENOMEM;
        }
 
-       map->offset = (unsigned long) ttm;
+       map->offset = (unsigned long)ttm;
        map->type = _DRM_TTM;
        map->flags = _DRM_REMOVABLE;
        map->size = ttm->num_pages * PAGE_SIZE;
        map->handle = (void *)object;
 
        list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
-                                                   ttm->num_pages,
-                                                   0,0);
+                                                   ttm->num_pages, 0, 0);
        if (!list->file_offset_node) {
                drm_ttm_object_remove(dev, object);
                return -ENOMEM;
        }
        list->file_offset_node = drm_mm_get_block(list->file_offset_node,
-                                                 ttm->num_pages,0);
+                                                 ttm->num_pages, 0);
 
        list->hash.key = list->file_offset_node->start;
 
index e5501d9..11a1375 100644 (file)
@@ -52,12 +52,12 @@ typedef struct drm_ttm_backend {
        unsigned long aperture_base;
        void *private;
        uint32_t flags;
-        uint32_t drm_map_type;
+       uint32_t drm_map_type;
        int (*needs_ub_cache_adjust) (struct drm_ttm_backend * backend);
        int (*populate) (struct drm_ttm_backend * backend,
                         unsigned long num_pages, struct page ** pages);
        void (*clear) (struct drm_ttm_backend * backend);
-       int (*bind) (struct drm_ttm_backend * backend, 
+       int (*bind) (struct drm_ttm_backend * backend,
                     unsigned long offset, int cached);
        int (*unbind) (struct drm_ttm_backend * backend);
        void (*destroy) (struct drm_ttm_backend * backend);
@@ -68,11 +68,11 @@ typedef struct drm_ttm {
        uint32_t page_flags;
        unsigned long num_pages;
        unsigned long aper_offset;
-        atomic_t vma_count;
+       atomic_t vma_count;
        struct drm_device *dev;
        int destroy;
-        uint32_t mapping_offset;
-        drm_ttm_backend_t *be;
+       uint32_t mapping_offset;
+       drm_ttm_backend_t *be;
        enum {
                ttm_bound,
                ttm_evicted,
@@ -80,8 +80,8 @@ typedef struct drm_ttm {
                ttm_unpopulated,
        } state;
 #ifdef DRM_ODD_MM_COMPAT
-       struct list_head vma_list;
-       struct list_head p_mm_list;
+       struct list_head vma_list;
+       struct list_head p_mm_list;
 #endif
 
 } drm_ttm_t;
@@ -93,7 +93,7 @@ typedef struct drm_ttm_object {
 } drm_ttm_object_t;
 
 extern int drm_ttm_object_create(struct drm_device *dev, unsigned long size,
-                                uint32_t flags, 
+                                uint32_t flags,
                                 drm_ttm_object_t ** ttm_object);
 extern void drm_ttm_object_deref_locked(struct drm_device *dev,
                                        drm_ttm_object_t * to);
@@ -102,8 +102,7 @@ extern void drm_ttm_object_deref_unlocked(struct drm_device *dev,
 extern drm_ttm_object_t *drm_lookup_ttm_object(drm_file_t * priv,
                                               uint32_t handle,
                                               int check_owner);
-extern int drm_bind_ttm(drm_ttm_t * ttm, int cached,
-                       unsigned long aper_offset);
+extern int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset);
 
 extern int drm_unbind_ttm(drm_ttm_t * ttm);
 
@@ -112,8 +111,7 @@ extern int drm_unbind_ttm(drm_ttm_t * ttm);
  */
 
 extern int drm_evict_ttm(drm_ttm_t * ttm);
-extern void drm_fixup_ttm_caching(drm_ttm_t *ttm);
-
+extern void drm_fixup_ttm_caching(drm_ttm_t * ttm);
 
 /*
  * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this, 
index 8a3d7bf..729ba4b 100644 (file)
 #define INTEL_AGP_MEM_USER 3
 #define INTEL_AGP_MEM_UCACHED 4
 
-drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t *dev)
+drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t * dev)
 {
-       return drm_agp_init_ttm(dev, NULL, INTEL_AGP_MEM_USER, INTEL_AGP_MEM_UCACHED,
-                               INTEL_AGP_MEM_USER);
+       return drm_agp_init_ttm(dev, NULL, INTEL_AGP_MEM_USER,
+                               INTEL_AGP_MEM_UCACHED, INTEL_AGP_MEM_USER);
 }
 
-int i915_fence_types(uint32_t buffer_flags, uint32_t *class, uint32_t *type)
+int i915_fence_types(uint32_t buffer_flags, uint32_t * class, uint32_t * type)
 {
        *class = 0;
-       if (buffer_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) 
+       if (buffer_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
                *type = 3;
        else
                *type = 1;
        return 0;
 }
 
-int i915_invalidate_caches(drm_device_t *dev, uint32_t flags)
+int i915_invalidate_caches(drm_device_t * dev, uint32_t flags)
 {
        /*
         * FIXME: Only emit once per batchbuffer submission.
@@ -65,6 +65,5 @@ int i915_invalidate_caches(drm_device_t *dev, uint32_t flags)
        if (flags & DRM_BO_FLAG_EXE)
                flush_cmd |= MI_EXE_FLUSH;
 
-       
        return i915_emit_mi_flush(dev, flush_cmd);
 }
index 49dc254..fc8ab76 100644 (file)
@@ -62,7 +62,7 @@ static void i915_perform_flush(drm_device_t * dev)
                diff = sequence - fm->last_exe_flush;
                if (diff < driver->wrap_diff && diff != 0) {
                        drm_fence_handler(dev, sequence, DRM_FENCE_TYPE_EXE);
-               } 
+               }
 
                diff = sequence - fm->exe_flush_sequence;
                if (diff < driver->wrap_diff) {
@@ -85,7 +85,7 @@ static void i915_perform_flush(drm_device_t * dev)
                        flush_sequence = dev_priv->flush_sequence;
                        dev_priv->flush_pending = 0;
                        drm_fence_handler(dev, flush_sequence, flush_flags);
-               } 
+               }
        }
 
        if (fm->pending_flush && !dev_priv->flush_pending) {
@@ -105,7 +105,7 @@ static void i915_perform_flush(drm_device_t * dev)
                        flush_sequence = dev_priv->flush_sequence;
                        dev_priv->flush_pending = 0;
                        drm_fence_handler(dev, flush_sequence, flush_flags);
-               } 
+               }
        }
 
 }
@@ -121,15 +121,15 @@ void i915_poke_flush(drm_device_t * dev)
 }
 
 int i915_fence_emit_sequence(drm_device_t * dev, uint32_t flags,
-                            uint32_t * sequence, uint32_t *native_type)
+                            uint32_t * sequence, uint32_t * native_type)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        i915_emit_irq(dev);
        *sequence = (uint32_t) dev_priv->counter;
-       *native_type = DRM_FENCE_TYPE_EXE;  
-       if (flags & DRM_I915_FENCE_FLAG_FLUSHED) 
+       *native_type = DRM_FENCE_TYPE_EXE;
+       if (flags & DRM_I915_FENCE_FLAG_FLUSHED)
                *native_type |= DRM_I915_FENCE_TYPE_RW;
-              
+
        return 0;
 }
 
@@ -141,4 +141,3 @@ void i915_fence_handler(drm_device_t * dev)
        i915_perform_flush(dev);
        write_unlock(&fm->lock);
 }
-