drm: remove unused define DRM_HAS_TTM
[profile/ivi/libdrm.git] / linux-core / drm_objects.h
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
29  */
30
31 #ifndef _DRM_OBJECTS_H
32 #define _DRM_OJBECTS_H
33
34 struct drm_device;
35
36 /***************************************************
37  * User space objects. (drm_object.c)
38  */
39
40 #define drm_user_object_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
41
42 typedef enum {
43         drm_fence_type,
44         drm_buffer_type,
45         drm_ttm_type
46             /*
47              * Add other user space object types here.
48              */
49 } drm_object_type_t;
50
51 /*
52  * A user object is a structure that helps the drm give out user handles
53  * to kernel internal objects and to keep track of these objects so that
54  * they can be destroyed, for example when the user space process exits.
55  * Designed to be accessible using a user space 32-bit handle.
56  */
57
58 typedef struct drm_user_object {
59         drm_hash_item_t hash;
60         struct list_head list;
61         drm_object_type_t type;
62         atomic_t refcount;
63         int shareable;
64         drm_file_t *owner;
65         void (*ref_struct_locked) (drm_file_t * priv,
66                                    struct drm_user_object * obj,
67                                    drm_ref_t ref_action);
68         void (*unref) (drm_file_t * priv, struct drm_user_object * obj,
69                        drm_ref_t unref_action);
70         void (*remove) (drm_file_t * priv, struct drm_user_object * obj);
71 } drm_user_object_t;
72
73 /*
74  * A ref object is a structure which is used to
75  * keep track of references to user objects and to keep track of these
76  * references so that they can be destroyed for example when the user space
77  * process exits. Designed to be accessible using a pointer to the _user_ object.
78  */
79
80 typedef struct drm_ref_object {
81         drm_hash_item_t hash;
82         struct list_head list;
83         atomic_t refcount;
84         drm_ref_t unref_action;
85 } drm_ref_object_t;
86
87 /**
88  * Must be called with the struct_mutex held.
89  */
90
91 extern int drm_add_user_object(drm_file_t * priv, drm_user_object_t * item,
92                                int shareable);
93 /**
94  * Must be called with the struct_mutex held.
95  */
96
97 extern drm_user_object_t *drm_lookup_user_object(drm_file_t * priv,
98                                                  uint32_t key);
99
100 /*
101  * Must be called with the struct_mutex held.
102  * If "item" has been obtained by a call to drm_lookup_user_object. You may not
103  * release the struct_mutex before calling drm_remove_ref_object.
104  * This function may temporarily release the struct_mutex.
105  */
106
107 extern int drm_remove_user_object(drm_file_t * priv, drm_user_object_t * item);
108
109 /*
110  * Must be called with the struct_mutex held. May temporarily release it.
111  */
112
113 extern int drm_add_ref_object(drm_file_t * priv,
114                               drm_user_object_t * referenced_object,
115                               drm_ref_t ref_action);
116
117 /*
118  * Must be called with the struct_mutex held.
119  */
120
121 drm_ref_object_t *drm_lookup_ref_object(drm_file_t * priv,
122                                         drm_user_object_t * referenced_object,
123                                         drm_ref_t ref_action);
124 /*
125  * Must be called with the struct_mutex held.
126  * If "item" has been obtained by a call to drm_lookup_ref_object. You may not
127  * release the struct_mutex before calling drm_remove_ref_object.
128  * This function may temporarily release the struct_mutex.
129  */
130
131 extern void drm_remove_ref_object(drm_file_t * priv, drm_ref_object_t * item);
132 extern int drm_user_object_ref(drm_file_t * priv, uint32_t user_token,
133                                drm_object_type_t type,
134                                drm_user_object_t ** object);
135 extern int drm_user_object_unref(drm_file_t * priv, uint32_t user_token,
136                                  drm_object_type_t type);
137
138 /***************************************************
139  * Fence objects. (drm_fence.c)
140  */
141
142 typedef struct drm_fence_object {
143         drm_user_object_t base;
144         atomic_t usage;
145
146         /*
147          * The below three fields are protected by the fence manager spinlock.
148          */
149
150         struct list_head ring;
151         int class;
152         uint32_t native_type;
153         uint32_t type;
154         uint32_t signaled;
155         uint32_t sequence;
156         uint32_t flush_mask;
157         uint32_t submitted_flush;
158 } drm_fence_object_t;
159
160 #define _DRM_FENCE_CLASSES 8
161 #define _DRM_FENCE_TYPE_EXE 0x00
162
163 typedef struct drm_fence_class_manager {
164         struct list_head ring;
165         uint32_t pending_flush;
166         wait_queue_head_t fence_queue;
167         int pending_exe_flush;
168         uint32_t last_exe_flush;
169         uint32_t exe_flush_sequence;
170 } drm_fence_class_manager_t;
171
172 typedef struct drm_fence_manager {
173         int initialized;
174         rwlock_t lock;
175         drm_fence_class_manager_t class[_DRM_FENCE_CLASSES];
176         uint32_t num_classes;
177         atomic_t count;
178 } drm_fence_manager_t;
179
180 typedef struct drm_fence_driver {
181         uint32_t num_classes;
182         uint32_t wrap_diff;
183         uint32_t flush_diff;
184         uint32_t sequence_mask;
185         int lazy_capable;
186         int (*has_irq) (struct drm_device * dev, uint32_t class,
187                         uint32_t flags);
188         int (*emit) (struct drm_device * dev, uint32_t class, uint32_t flags,
189                      uint32_t * breadcrumb, uint32_t * native_type);
190         void (*poke_flush) (struct drm_device * dev, uint32_t class);
191 } drm_fence_driver_t;
192
193 extern void drm_fence_handler(struct drm_device *dev, uint32_t class,
194                               uint32_t sequence, uint32_t type);
195 extern void drm_fence_manager_init(struct drm_device *dev);
196 extern void drm_fence_manager_takedown(struct drm_device *dev);
197 extern void drm_fence_flush_old(struct drm_device *dev, uint32_t class,
198                                 uint32_t sequence);
199 extern int drm_fence_object_flush(struct drm_device *dev,
200                                   drm_fence_object_t * fence, uint32_t type);
201 extern int drm_fence_object_signaled(drm_fence_object_t * fence, uint32_t type);
202 extern void drm_fence_usage_deref_locked(struct drm_device *dev,
203                                          drm_fence_object_t * fence);
204 extern void drm_fence_usage_deref_unlocked(struct drm_device *dev,
205                                            drm_fence_object_t * fence);
206 extern int drm_fence_object_wait(struct drm_device *dev,
207                                  drm_fence_object_t * fence,
208                                  int lazy, int ignore_signals, uint32_t mask);
209 extern int drm_fence_object_create(struct drm_device *dev, uint32_t type,
210                                    uint32_t fence_flags, uint32_t class,
211                                    drm_fence_object_t ** c_fence);
212 extern int drm_fence_add_user_object(drm_file_t * priv,
213                                      drm_fence_object_t * fence, int shareable);
214 extern int drm_fence_ioctl(DRM_IOCTL_ARGS);
215
216 /**************************************************
217  *TTMs
218  */
219
220 /*
221  * The ttm backend GTT interface. (In our case AGP).
222  * Any similar type of device (PCIE?)
223  * needs only to implement these functions to be usable with the "TTM" interface.
224  * The AGP backend implementation lives in drm_agpsupport.c
225  * basically maps these calls to available functions in agpgart.
226  * Each drm device driver gets an
227  * additional function pointer that creates these types,
228  * so that the device can choose the correct aperture.
229  * (Multiple AGP apertures, etc.)
230  * Most device drivers will let this point to the standard AGP implementation.
231  */
232
233 #define DRM_BE_FLAG_NEEDS_FREE     0x00000001
234 #define DRM_BE_FLAG_BOUND_CACHED   0x00000002
235
236 struct drm_ttm_backend;
237 typedef struct drm_ttm_backend_func {
238         int (*needs_ub_cache_adjust) (struct drm_ttm_backend * backend);
239         int (*populate) (struct drm_ttm_backend * backend,
240                          unsigned long num_pages, struct page ** pages);
241         void (*clear) (struct drm_ttm_backend * backend);
242         int (*bind) (struct drm_ttm_backend * backend,
243                      unsigned long offset, int cached);
244         int (*unbind) (struct drm_ttm_backend * backend);
245         void (*destroy) (struct drm_ttm_backend * backend);
246 } drm_ttm_backend_func_t;
247
248
249 typedef struct drm_ttm_backend {
250         uint32_t flags;
251         int mem_type;
252         drm_ttm_backend_func_t *func;
253 } drm_ttm_backend_t;
254
255 typedef struct drm_ttm {
256         struct page **pages;
257         uint32_t page_flags;
258         unsigned long num_pages;
259         unsigned long aper_offset;
260         atomic_t vma_count;
261         struct drm_device *dev;
262         int destroy;
263         uint32_t mapping_offset;
264         drm_ttm_backend_t *be;
265         enum {
266                 ttm_bound,
267                 ttm_evicted,
268                 ttm_unbound,
269                 ttm_unpopulated,
270         } state;
271
272 } drm_ttm_t;
273
274 extern drm_ttm_t *drm_ttm_init(struct drm_device *dev, unsigned long size);
275 extern int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset);
276 extern void drm_ttm_unbind(drm_ttm_t * ttm);
277 extern void drm_ttm_evict(drm_ttm_t * ttm);
278 extern void drm_ttm_fixup_caching(drm_ttm_t * ttm);
279 extern struct page *drm_ttm_get_page(drm_ttm_t * ttm, int index);
280
281 /*
282  * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this,
283  * which calls this function iff there are no vmas referencing it anymore. Otherwise it is called
284  * when the last vma exits.
285  */
286
287 extern int drm_destroy_ttm(drm_ttm_t * ttm);
288
289 #define DRM_FLAG_MASKED(_old, _new, _mask) {\
290 (_old) ^= (((_old) ^ (_new)) & (_mask)); \
291 }
292
293 #define DRM_TTM_MASK_FLAGS ((1 << PAGE_SHIFT) - 1)
294 #define DRM_TTM_MASK_PFN (0xFFFFFFFFU - DRM_TTM_MASK_FLAGS)
295
296 /*
297  * Page flags.
298  */
299
300 #define DRM_TTM_PAGE_UNCACHED 0x01
301 #define DRM_TTM_PAGE_USED     0x02
302 #define DRM_TTM_PAGE_BOUND    0x04
303 #define DRM_TTM_PAGE_PRESENT  0x08
304 #define DRM_TTM_PAGE_VMALLOC  0x10
305
306 /***************************************************
307  * Buffer objects. (drm_bo.c, drm_bo_move.c)
308  */
309
310 typedef struct drm_bo_mem_reg {
311         drm_mm_node_t *mm_node;
312         unsigned long size;
313         unsigned long num_pages;
314         uint32_t page_alignment;
315         uint32_t mem_type;
316         uint32_t flags;
317         uint32_t mask;
318 } drm_bo_mem_reg_t;
319
320 typedef struct drm_buffer_object {
321         struct drm_device *dev;
322         drm_user_object_t base;
323
324         /*
325          * If there is a possibility that the usage variable is zero,
326          * then dev->struct_mutext should be locked before incrementing it.
327          */
328
329         atomic_t usage;
330         unsigned long buffer_start;
331         drm_bo_type_t type;
332         unsigned long offset;
333         atomic_t mapped;
334         drm_bo_mem_reg_t mem;
335
336         struct list_head lru;
337         struct list_head ddestroy;
338
339         uint32_t fence_type;
340         uint32_t fence_class;
341         drm_fence_object_t *fence;
342         uint32_t priv_flags;
343         wait_queue_head_t event_queue;
344         struct mutex mutex;
345
346         /* For pinned buffers */
347         drm_mm_node_t *pinned_node;
348         uint32_t pinned_mem_type;
349         struct list_head pinned_lru;
350
351         /* For vm */
352
353         drm_ttm_t *ttm;
354         drm_map_list_t map_list;
355         uint32_t memory_type;
356         unsigned long bus_offset;
357         uint32_t vm_flags;
358         void *iomap;
359
360 #ifdef DRM_ODD_MM_COMPAT
361         /* dev->struct_mutex only protected. */
362         struct list_head vma_list;
363         struct list_head p_mm_list;
364 #endif
365
366 } drm_buffer_object_t;
367
368 #define _DRM_BO_FLAG_UNFENCED 0x00000001
369 #define _DRM_BO_FLAG_EVICTED  0x00000002
370
371 typedef struct drm_mem_type_manager {
372         int has_type;
373         int use_type;
374         drm_mm_t manager;
375         struct list_head lru;
376         struct list_head pinned;
377         uint32_t flags;
378         uint32_t drm_bus_maptype;
379         unsigned long io_offset;
380         unsigned long io_size;
381         void *io_addr;
382 } drm_mem_type_manager_t;
383
384 #define _DRM_FLAG_MEMTYPE_FIXED     0x00000001  /* Fixed (on-card) PCI memory */
385 #define _DRM_FLAG_MEMTYPE_MAPPABLE  0x00000002  /* Memory mappable */
386 #define _DRM_FLAG_MEMTYPE_CACHED    0x00000004  /* Cached binding */
387 #define _DRM_FLAG_NEEDS_IOREMAP     0x00000008  /* Fixed memory needs ioremap
388                                                    before kernel access. */
389 #define _DRM_FLAG_MEMTYPE_CMA       0x00000010  /* Can't map aperture */
390 #define _DRM_FLAG_MEMTYPE_CSELECT   0x00000020  /* Select caching */
391
392 typedef struct drm_buffer_manager {
393         struct mutex init_mutex;
394         struct mutex evict_mutex;
395         int nice_mode;
396         int initialized;
397         drm_file_t *last_to_validate;
398         drm_mem_type_manager_t man[DRM_BO_MEM_TYPES];
399         struct list_head unfenced;
400         struct list_head ddestroy;
401 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
402         struct work_struct wq;
403 #else
404         struct delayed_work wq;
405 #endif
406         uint32_t fence_type;
407         unsigned long cur_pages;
408         atomic_t count;
409 } drm_buffer_manager_t;
410
411 typedef struct drm_bo_driver {
412         const uint32_t *mem_type_prio;
413         const uint32_t *mem_busy_prio;
414         uint32_t num_mem_type_prio;
415         uint32_t num_mem_busy_prio;
416         drm_ttm_backend_t *(*create_ttm_backend_entry)
417          (struct drm_device * dev);
418         int (*fence_type) (struct drm_buffer_object *bo, uint32_t * class, uint32_t * type);
419         int (*invalidate_caches) (struct drm_device * dev, uint32_t flags);
420         int (*init_mem_type) (struct drm_device * dev, uint32_t type,
421                               drm_mem_type_manager_t * man);
422          uint32_t(*evict_mask) (struct drm_buffer_object *bo);
423         int (*move) (struct drm_buffer_object * bo,
424                      int evict, int no_wait, struct drm_bo_mem_reg * new_mem);
425 } drm_bo_driver_t;
426
427 /*
428  * buffer objects (drm_bo.c)
429  */
430
431 extern int drm_bo_ioctl(DRM_IOCTL_ARGS);
432 extern int drm_mm_init_ioctl(DRM_IOCTL_ARGS);
433 extern int drm_bo_driver_finish(struct drm_device *dev);
434 extern int drm_bo_driver_init(struct drm_device *dev);
435 extern int drm_bo_pci_offset(struct drm_device *dev,
436                              drm_bo_mem_reg_t * mem,
437                              unsigned long *bus_base,
438                              unsigned long *bus_offset,
439                              unsigned long *bus_size);
440 extern int drm_mem_reg_is_pci(struct drm_device *dev, drm_bo_mem_reg_t * mem);
441
442 extern void drm_bo_usage_deref_locked(drm_buffer_object_t * bo);
443 extern int drm_fence_buffer_objects(drm_file_t * priv,
444                                     struct list_head *list,
445                                     uint32_t fence_flags,
446                                     drm_fence_object_t * fence,
447                                     drm_fence_object_t ** used_fence);
448 extern void drm_bo_add_to_lru(drm_buffer_object_t * bo);
449 extern int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
450                        int no_wait);
451 extern int drm_bo_mem_space(drm_buffer_object_t * bo,
452                             drm_bo_mem_reg_t * mem, int no_wait);
453 extern int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags,
454                               int no_wait, int move_unfenced);
455
456 /*
457  * Buffer object memory move helpers.
458  * drm_bo_move.c
459  */
460
461 extern int drm_bo_move_ttm(drm_buffer_object_t * bo,
462                            int evict, int no_wait, drm_bo_mem_reg_t * new_mem);
463 extern int drm_bo_move_memcpy(drm_buffer_object_t * bo,
464                               int evict,
465                               int no_wait, drm_bo_mem_reg_t * new_mem);
466 extern int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo,
467                                      int evict,
468                                      int no_wait,
469                                      uint32_t fence_class,
470                                      uint32_t fence_type,
471                                      uint32_t fence_flags,
472                                      drm_bo_mem_reg_t * new_mem);
473
474 #endif