1 /**************************************************************************
3 * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
31 #ifndef _DRM_OBJECTS_H
32 #define _DRM_OBJECTS_H
36 /***************************************************
37 * User space objects. (drm_object.c)
40 #define drm_user_object_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
47 * Add other user space object types here.
52 * A user object is a structure that helps the drm give out user handles
53 * to kernel internal objects and to keep track of these objects so that
54 * they can be destroyed, for example when the user space process exits.
55 * Designed to be accessible using a user space 32-bit handle.
58 typedef struct drm_user_object {
60 struct list_head list;
61 drm_object_type_t type;
65 void (*ref_struct_locked) (drm_file_t * priv,
66 struct drm_user_object * obj,
67 drm_ref_t ref_action);
68 void (*unref) (drm_file_t * priv, struct drm_user_object * obj,
69 drm_ref_t unref_action);
70 void (*remove) (drm_file_t * priv, struct drm_user_object * obj);
74 * A ref object is a structure which is used to
75 * keep track of references to user objects and to keep track of these
76 * references so that they can be destroyed for example when the user space
77 * process exits. Designed to be accessible using a pointer to the _user_ object.
80 typedef struct drm_ref_object {
82 struct list_head list;
84 drm_ref_t unref_action;
88 * Must be called with the struct_mutex held.
91 extern int drm_add_user_object(drm_file_t * priv, drm_user_object_t * item,
94 * Must be called with the struct_mutex held.
97 extern drm_user_object_t *drm_lookup_user_object(drm_file_t * priv,
101 * Must be called with the struct_mutex held.
102 * If "item" has been obtained by a call to drm_lookup_user_object. You may not
103 * release the struct_mutex before calling drm_remove_ref_object.
104 * This function may temporarily release the struct_mutex.
107 extern int drm_remove_user_object(drm_file_t * priv, drm_user_object_t * item);
110 * Must be called with the struct_mutex held. May temporarily release it.
113 extern int drm_add_ref_object(drm_file_t * priv,
114 drm_user_object_t * referenced_object,
115 drm_ref_t ref_action);
118 * Must be called with the struct_mutex held.
121 drm_ref_object_t *drm_lookup_ref_object(drm_file_t * priv,
122 drm_user_object_t * referenced_object,
123 drm_ref_t ref_action);
125 * Must be called with the struct_mutex held.
126 * If "item" has been obtained by a call to drm_lookup_ref_object. You may not
127 * release the struct_mutex before calling drm_remove_ref_object.
128 * This function may temporarily release the struct_mutex.
131 extern void drm_remove_ref_object(drm_file_t * priv, drm_ref_object_t * item);
132 extern int drm_user_object_ref(drm_file_t * priv, uint32_t user_token,
133 drm_object_type_t type,
134 drm_user_object_t ** object);
135 extern int drm_user_object_unref(drm_file_t * priv, uint32_t user_token,
136 drm_object_type_t type);
138 /***************************************************
139 * Fence objects. (drm_fence.c)
142 typedef struct drm_fence_object {
143 drm_user_object_t base;
147 * The below three fields are protected by the fence manager spinlock.
150 struct list_head ring;
152 uint32_t native_type;
157 uint32_t submitted_flush;
158 } drm_fence_object_t;
160 #define _DRM_FENCE_CLASSES 8
161 #define _DRM_FENCE_TYPE_EXE 0x00
163 typedef struct drm_fence_class_manager {
164 struct list_head ring;
165 uint32_t pending_flush;
166 wait_queue_head_t fence_queue;
167 int pending_exe_flush;
168 uint32_t last_exe_flush;
169 uint32_t exe_flush_sequence;
170 } drm_fence_class_manager_t;
172 typedef struct drm_fence_manager {
175 drm_fence_class_manager_t class[_DRM_FENCE_CLASSES];
176 uint32_t num_classes;
178 } drm_fence_manager_t;
180 typedef struct drm_fence_driver {
181 uint32_t num_classes;
184 uint32_t sequence_mask;
186 int (*has_irq) (struct drm_device * dev, uint32_t class,
188 int (*emit) (struct drm_device * dev, uint32_t class, uint32_t flags,
189 uint32_t * breadcrumb, uint32_t * native_type);
190 void (*poke_flush) (struct drm_device * dev, uint32_t class);
191 } drm_fence_driver_t;
193 extern void drm_fence_handler(struct drm_device *dev, uint32_t class,
194 uint32_t sequence, uint32_t type);
195 extern void drm_fence_manager_init(struct drm_device *dev);
196 extern void drm_fence_manager_takedown(struct drm_device *dev);
197 extern void drm_fence_flush_old(struct drm_device *dev, uint32_t class,
199 extern int drm_fence_object_flush(struct drm_device *dev,
200 drm_fence_object_t * fence, uint32_t type);
201 extern int drm_fence_object_signaled(drm_fence_object_t * fence, uint32_t type);
202 extern void drm_fence_usage_deref_locked(struct drm_device *dev,
203 drm_fence_object_t * fence);
204 extern void drm_fence_usage_deref_unlocked(struct drm_device *dev,
205 drm_fence_object_t * fence);
206 extern int drm_fence_object_wait(struct drm_device *dev,
207 drm_fence_object_t * fence,
208 int lazy, int ignore_signals, uint32_t mask);
209 extern int drm_fence_object_create(struct drm_device *dev, uint32_t type,
210 uint32_t fence_flags, uint32_t class,
211 drm_fence_object_t ** c_fence);
212 extern int drm_fence_add_user_object(drm_file_t * priv,
213 drm_fence_object_t * fence, int shareable);
215 extern int drm_fence_create_ioctl(DRM_IOCTL_ARGS);
216 extern int drm_fence_destroy_ioctl(DRM_IOCTL_ARGS);
217 extern int drm_fence_reference_ioctl(DRM_IOCTL_ARGS);
218 extern int drm_fence_unreference_ioctl(DRM_IOCTL_ARGS);
219 extern int drm_fence_signaled_ioctl(DRM_IOCTL_ARGS);
220 extern int drm_fence_flush_ioctl(DRM_IOCTL_ARGS);
221 extern int drm_fence_wait_ioctl(DRM_IOCTL_ARGS);
222 extern int drm_fence_emit_ioctl(DRM_IOCTL_ARGS);
223 extern int drm_fence_buffers_ioctl(DRM_IOCTL_ARGS);
224 /**************************************************
229 * The ttm backend GTT interface. (In our case AGP).
230 * Any similar type of device (PCIE?)
231 * needs only to implement these functions to be usable with the "TTM" interface.
232 * The AGP backend implementation lives in drm_agpsupport.c
233 * basically maps these calls to available functions in agpgart.
234 * Each drm device driver gets an
235 * additional function pointer that creates these types,
236 * so that the device can choose the correct aperture.
237 * (Multiple AGP apertures, etc.)
238 * Most device drivers will let this point to the standard AGP implementation.
241 #define DRM_BE_FLAG_NEEDS_FREE 0x00000001
242 #define DRM_BE_FLAG_BOUND_CACHED 0x00000002
244 struct drm_ttm_backend;
245 typedef struct drm_ttm_backend_func {
246 int (*needs_ub_cache_adjust) (struct drm_ttm_backend * backend);
247 int (*populate) (struct drm_ttm_backend * backend,
248 unsigned long num_pages, struct page ** pages);
249 void (*clear) (struct drm_ttm_backend * backend);
250 int (*bind) (struct drm_ttm_backend * backend,
251 unsigned long offset, int cached);
252 int (*unbind) (struct drm_ttm_backend * backend);
253 void (*destroy) (struct drm_ttm_backend * backend);
254 } drm_ttm_backend_func_t;
257 typedef struct drm_ttm_backend {
260 drm_ttm_backend_func_t *func;
263 typedef struct drm_ttm {
266 unsigned long num_pages;
267 unsigned long aper_offset;
269 struct drm_device *dev;
271 uint32_t mapping_offset;
272 drm_ttm_backend_t *be;
282 extern drm_ttm_t *drm_ttm_init(struct drm_device *dev, unsigned long size);
283 extern int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset);
284 extern void drm_ttm_unbind(drm_ttm_t * ttm);
285 extern void drm_ttm_evict(drm_ttm_t * ttm);
286 extern void drm_ttm_fixup_caching(drm_ttm_t * ttm);
287 extern struct page *drm_ttm_get_page(drm_ttm_t * ttm, int index);
290 * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this,
291 * which calls this function iff there are no vmas referencing it anymore. Otherwise it is called
292 * when the last vma exits.
295 extern int drm_destroy_ttm(drm_ttm_t * ttm);
297 #define DRM_FLAG_MASKED(_old, _new, _mask) {\
298 (_old) ^= (((_old) ^ (_new)) & (_mask)); \
301 #define DRM_TTM_MASK_FLAGS ((1 << PAGE_SHIFT) - 1)
302 #define DRM_TTM_MASK_PFN (0xFFFFFFFFU - DRM_TTM_MASK_FLAGS)
308 #define DRM_TTM_PAGE_UNCACHED 0x01
309 #define DRM_TTM_PAGE_USED 0x02
310 #define DRM_TTM_PAGE_BOUND 0x04
311 #define DRM_TTM_PAGE_PRESENT 0x08
312 #define DRM_TTM_PAGE_VMALLOC 0x10
314 /***************************************************
315 * Buffer objects. (drm_bo.c, drm_bo_move.c)
318 typedef struct drm_bo_mem_reg {
319 drm_mm_node_t *mm_node;
321 unsigned long num_pages;
322 uint32_t page_alignment;
328 typedef struct drm_buffer_object {
329 struct drm_device *dev;
330 drm_user_object_t base;
333 * If there is a possibility that the usage variable is zero,
334 * then dev->struct_mutext should be locked before incrementing it.
338 unsigned long buffer_start;
340 unsigned long offset;
342 drm_bo_mem_reg_t mem;
344 struct list_head lru;
345 struct list_head ddestroy;
348 uint32_t fence_class;
349 drm_fence_object_t *fence;
351 wait_queue_head_t event_queue;
354 /* For pinned buffers */
355 drm_mm_node_t *pinned_node;
356 uint32_t pinned_mem_type;
357 struct list_head pinned_lru;
362 drm_map_list_t map_list;
363 uint32_t memory_type;
364 unsigned long bus_offset;
368 #ifdef DRM_ODD_MM_COMPAT
369 /* dev->struct_mutex only protected. */
370 struct list_head vma_list;
371 struct list_head p_mm_list;
374 } drm_buffer_object_t;
376 #define _DRM_BO_FLAG_UNFENCED 0x00000001
377 #define _DRM_BO_FLAG_EVICTED 0x00000002
379 typedef struct drm_mem_type_manager {
383 struct list_head lru;
384 struct list_head pinned;
386 uint32_t drm_bus_maptype;
387 unsigned long io_offset;
388 unsigned long io_size;
390 } drm_mem_type_manager_t;
392 #define _DRM_FLAG_MEMTYPE_FIXED 0x00000001 /* Fixed (on-card) PCI memory */
393 #define _DRM_FLAG_MEMTYPE_MAPPABLE 0x00000002 /* Memory mappable */
394 #define _DRM_FLAG_MEMTYPE_CACHED 0x00000004 /* Cached binding */
395 #define _DRM_FLAG_NEEDS_IOREMAP 0x00000008 /* Fixed memory needs ioremap
396 before kernel access. */
397 #define _DRM_FLAG_MEMTYPE_CMA 0x00000010 /* Can't map aperture */
398 #define _DRM_FLAG_MEMTYPE_CSELECT 0x00000020 /* Select caching */
400 typedef struct drm_buffer_manager {
401 struct mutex init_mutex;
402 struct mutex evict_mutex;
405 drm_file_t *last_to_validate;
406 drm_mem_type_manager_t man[DRM_BO_MEM_TYPES];
407 struct list_head unfenced;
408 struct list_head ddestroy;
409 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
410 struct work_struct wq;
412 struct delayed_work wq;
415 unsigned long cur_pages;
417 } drm_buffer_manager_t;
419 typedef struct drm_bo_driver {
420 const uint32_t *mem_type_prio;
421 const uint32_t *mem_busy_prio;
422 uint32_t num_mem_type_prio;
423 uint32_t num_mem_busy_prio;
424 drm_ttm_backend_t *(*create_ttm_backend_entry)
425 (struct drm_device * dev);
426 int (*fence_type) (struct drm_buffer_object *bo, uint32_t * class, uint32_t * type);
427 int (*invalidate_caches) (struct drm_device * dev, uint32_t flags);
428 int (*init_mem_type) (struct drm_device * dev, uint32_t type,
429 drm_mem_type_manager_t * man);
430 uint32_t(*evict_mask) (struct drm_buffer_object *bo);
431 int (*move) (struct drm_buffer_object * bo,
432 int evict, int no_wait, struct drm_bo_mem_reg * new_mem);
436 * buffer objects (drm_bo.c)
439 extern int drm_bo_ioctl(DRM_IOCTL_ARGS);
440 extern int drm_mm_init_ioctl(DRM_IOCTL_ARGS);
441 extern int drm_mm_takedown_ioctl(DRM_IOCTL_ARGS);
442 extern int drm_mm_lock_ioctl(DRM_IOCTL_ARGS);
443 extern int drm_mm_unlock_ioctl(DRM_IOCTL_ARGS);
444 extern int drm_bo_driver_finish(struct drm_device *dev);
445 extern int drm_bo_driver_init(struct drm_device *dev);
446 extern int drm_bo_pci_offset(struct drm_device *dev,
447 drm_bo_mem_reg_t * mem,
448 unsigned long *bus_base,
449 unsigned long *bus_offset,
450 unsigned long *bus_size);
451 extern int drm_mem_reg_is_pci(struct drm_device *dev, drm_bo_mem_reg_t * mem);
453 extern void drm_bo_usage_deref_locked(drm_buffer_object_t * bo);
454 extern int drm_fence_buffer_objects(drm_file_t * priv,
455 struct list_head *list,
456 uint32_t fence_flags,
457 drm_fence_object_t * fence,
458 drm_fence_object_t ** used_fence);
459 extern void drm_bo_add_to_lru(drm_buffer_object_t * bo);
460 extern int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
462 extern int drm_bo_mem_space(drm_buffer_object_t * bo,
463 drm_bo_mem_reg_t * mem, int no_wait);
464 extern int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags,
465 int no_wait, int move_unfenced);
468 * Buffer object memory move helpers.
472 extern int drm_bo_move_ttm(drm_buffer_object_t * bo,
473 int evict, int no_wait, drm_bo_mem_reg_t * new_mem);
474 extern int drm_bo_move_memcpy(drm_buffer_object_t * bo,
476 int no_wait, drm_bo_mem_reg_t * new_mem);
477 extern int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo,
480 uint32_t fence_class,
482 uint32_t fence_flags,
483 drm_bo_mem_reg_t * new_mem);