1 /**************************************************************************
3 * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
31 #ifndef _DRM_OBJECTS_H
32 #define _DRM_OBJECTS_H
35 struct drm_bo_mem_reg;
37 /***************************************************
38 * User space objects. (drm_object.c)
41 #define drm_user_object_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
43 enum drm_object_type {
48 * Add other user space object types here.
50 drm_driver_type0 = 256,
58 * A user object is a structure that helps the drm give out user handles
59 * to kernel internal objects and to keep track of these objects so that
60 * they can be destroyed, for example when the user space process exits.
61 * Designed to be accessible using a user space 32-bit handle.
64 struct drm_user_object {
65 struct drm_hash_item hash;
66 struct list_head list;
67 enum drm_object_type type;
70 struct drm_file *owner;
71 void (*ref_struct_locked) (struct drm_file * priv,
72 struct drm_user_object * obj,
73 enum drm_ref_type ref_action);
74 void (*unref) (struct drm_file * priv, struct drm_user_object * obj,
75 enum drm_ref_type unref_action);
76 void (*remove) (struct drm_file * priv, struct drm_user_object * obj);
80 * A ref object is a structure which is used to
81 * keep track of references to user objects and to keep track of these
82 * references so that they can be destroyed for example when the user space
83 * process exits. Designed to be accessible using a pointer to the _user_ object.
86 struct drm_ref_object {
87 struct drm_hash_item hash;
88 struct list_head list;
90 enum drm_ref_type unref_action;
94 * Must be called with the struct_mutex held.
97 extern int drm_add_user_object(struct drm_file * priv, struct drm_user_object * item,
100 * Must be called with the struct_mutex held.
103 extern struct drm_user_object *drm_lookup_user_object(struct drm_file * priv,
107 * Must be called with the struct_mutex held. May temporarily release it.
110 extern int drm_add_ref_object(struct drm_file * priv,
111 struct drm_user_object * referenced_object,
112 enum drm_ref_type ref_action);
115 * Must be called with the struct_mutex held.
118 struct drm_ref_object *drm_lookup_ref_object(struct drm_file * priv,
119 struct drm_user_object * referenced_object,
120 enum drm_ref_type ref_action);
122 * Must be called with the struct_mutex held.
123 * If "item" has been obtained by a call to drm_lookup_ref_object. You may not
124 * release the struct_mutex before calling drm_remove_ref_object.
125 * This function may temporarily release the struct_mutex.
128 extern void drm_remove_ref_object(struct drm_file * priv, struct drm_ref_object * item);
129 extern int drm_user_object_ref(struct drm_file * priv, uint32_t user_token,
130 enum drm_object_type type,
131 struct drm_user_object ** object);
132 extern int drm_user_object_unref(struct drm_file * priv, uint32_t user_token,
133 enum drm_object_type type);
135 /***************************************************
136 * Fence objects. (drm_fence.c)
139 struct drm_fence_object {
140 struct drm_user_object base;
141 struct drm_device *dev;
145 * The below three fields are protected by the fence manager spinlock.
148 struct list_head ring;
150 uint32_t native_type;
155 uint32_t submitted_flush;
159 #define _DRM_FENCE_CLASSES 8
160 #define _DRM_FENCE_TYPE_EXE 0x00
162 struct drm_fence_class_manager {
163 struct list_head ring;
164 uint32_t pending_flush;
165 wait_queue_head_t fence_queue;
166 int pending_exe_flush;
167 uint32_t last_exe_flush;
168 uint32_t exe_flush_sequence;
171 struct drm_fence_manager {
174 struct drm_fence_class_manager fence_class[_DRM_FENCE_CLASSES];
175 uint32_t num_classes;
179 struct drm_fence_driver {
180 uint32_t num_classes;
183 uint32_t sequence_mask;
185 int (*has_irq) (struct drm_device * dev, uint32_t fence_class,
187 int (*emit) (struct drm_device * dev, uint32_t fence_class, uint32_t flags,
188 uint32_t * breadcrumb, uint32_t * native_type);
189 void (*poke_flush) (struct drm_device * dev, uint32_t fence_class);
192 extern void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
193 uint32_t sequence, uint32_t type, uint32_t error);
194 extern void drm_fence_manager_init(struct drm_device *dev);
195 extern void drm_fence_manager_takedown(struct drm_device *dev);
196 extern void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class,
198 extern int drm_fence_object_flush(struct drm_fence_object * fence, uint32_t type);
199 extern int drm_fence_object_signaled(struct drm_fence_object * fence,
200 uint32_t type, int flush);
201 extern void drm_fence_usage_deref_locked(struct drm_fence_object ** fence);
202 extern void drm_fence_usage_deref_unlocked(struct drm_fence_object ** fence);
203 extern struct drm_fence_object *drm_fence_reference_locked(struct drm_fence_object *src);
204 extern void drm_fence_reference_unlocked(struct drm_fence_object **dst,
205 struct drm_fence_object *src);
206 extern int drm_fence_object_wait(struct drm_fence_object * fence,
207 int lazy, int ignore_signals, uint32_t mask);
208 extern int drm_fence_object_create(struct drm_device *dev, uint32_t type,
209 uint32_t fence_flags, uint32_t fence_class,
210 struct drm_fence_object ** c_fence);
211 extern int drm_fence_object_emit(struct drm_fence_object * fence,
212 uint32_t fence_flags, uint32_t class,
214 extern void drm_fence_fill_arg(struct drm_fence_object *fence,
215 struct drm_fence_arg *arg);
217 extern int drm_fence_add_user_object(struct drm_file * priv,
218 struct drm_fence_object * fence, int shareable);
220 extern int drm_fence_create_ioctl(struct drm_device *dev, void *data,
221 struct drm_file *file_priv);
222 extern int drm_fence_destroy_ioctl(struct drm_device *dev, void *data,
223 struct drm_file *file_priv);
224 extern int drm_fence_reference_ioctl(struct drm_device *dev, void *data,
225 struct drm_file *file_priv);
226 extern int drm_fence_unreference_ioctl(struct drm_device *dev, void *data,
227 struct drm_file *file_priv);
228 extern int drm_fence_signaled_ioctl(struct drm_device *dev, void *data,
229 struct drm_file *file_priv);
230 extern int drm_fence_flush_ioctl(struct drm_device *dev, void *data,
231 struct drm_file *file_priv);
232 extern int drm_fence_wait_ioctl(struct drm_device *dev, void *data,
233 struct drm_file *file_priv);
234 extern int drm_fence_emit_ioctl(struct drm_device *dev, void *data,
235 struct drm_file *file_priv);
236 extern int drm_fence_buffers_ioctl(struct drm_device *dev, void *data,
237 struct drm_file *file_priv);
238 /**************************************************
243 * The ttm backend GTT interface. (In our case AGP).
244 * Any similar type of device (PCIE?)
245 * needs only to implement these functions to be usable with the "TTM" interface.
246 * The AGP backend implementation lives in drm_agpsupport.c
247 * basically maps these calls to available functions in agpgart.
248 * Each drm device driver gets an
249 * additional function pointer that creates these types,
250 * so that the device can choose the correct aperture.
251 * (Multiple AGP apertures, etc.)
252 * Most device drivers will let this point to the standard AGP implementation.
255 #define DRM_BE_FLAG_NEEDS_FREE 0x00000001
256 #define DRM_BE_FLAG_BOUND_CACHED 0x00000002
258 struct drm_ttm_backend;
259 struct drm_ttm_backend_func {
260 int (*needs_ub_cache_adjust) (struct drm_ttm_backend * backend);
261 int (*populate) (struct drm_ttm_backend * backend,
262 unsigned long num_pages, struct page ** pages);
263 void (*clear) (struct drm_ttm_backend * backend);
264 int (*bind) (struct drm_ttm_backend * backend,
265 struct drm_bo_mem_reg * bo_mem);
266 int (*unbind) (struct drm_ttm_backend * backend);
267 void (*destroy) (struct drm_ttm_backend * backend);
271 typedef struct drm_ttm_backend {
272 struct drm_device *dev;
274 struct drm_ttm_backend_func *func;
280 unsigned long num_pages;
282 struct drm_device *dev;
284 uint32_t mapping_offset;
285 struct drm_ttm_backend *be;
295 extern struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size);
296 extern int drm_bind_ttm(struct drm_ttm * ttm, struct drm_bo_mem_reg *bo_mem);
297 extern void drm_ttm_unbind(struct drm_ttm * ttm);
298 extern void drm_ttm_evict(struct drm_ttm * ttm);
299 extern void drm_ttm_fixup_caching(struct drm_ttm * ttm);
300 extern struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index);
301 extern void drm_ttm_cache_flush(void);
302 extern int drm_ttm_populate(struct drm_ttm * ttm);
305 * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this,
306 * which calls this function iff there are no vmas referencing it anymore. Otherwise it is called
307 * when the last vma exits.
310 extern int drm_destroy_ttm(struct drm_ttm * ttm);
312 #define DRM_FLAG_MASKED(_old, _new, _mask) {\
313 (_old) ^= (((_old) ^ (_new)) & (_mask)); \
316 #define DRM_TTM_MASK_FLAGS ((1 << PAGE_SHIFT) - 1)
317 #define DRM_TTM_MASK_PFN (0xFFFFFFFFU - DRM_TTM_MASK_FLAGS)
323 #define DRM_TTM_PAGE_UNCACHED 0x01
324 #define DRM_TTM_PAGE_USED 0x02
325 #define DRM_TTM_PAGE_BOUND 0x04
326 #define DRM_TTM_PAGE_PRESENT 0x08
327 #define DRM_TTM_PAGE_VMALLOC 0x10
329 /***************************************************
330 * Buffer objects. (drm_bo.c, drm_bo_move.c)
333 struct drm_bo_mem_reg {
334 struct drm_mm_node *mm_node;
336 unsigned long num_pages;
337 uint32_t page_alignment;
341 uint32_t desired_tile_stride;
342 uint32_t hw_tile_stride;
348 drm_bo_type_kernel, /* for initial kernel allocations */
351 struct drm_buffer_object {
352 struct drm_device *dev;
353 struct drm_user_object base;
356 * If there is a possibility that the usage variable is zero,
357 * then dev->struct_mutext should be locked before incrementing it.
361 unsigned long buffer_start;
362 enum drm_bo_type type;
363 unsigned long offset;
365 struct drm_bo_mem_reg mem;
367 struct list_head lru;
368 struct list_head ddestroy;
371 uint32_t fence_class;
372 uint32_t new_fence_type;
373 uint32_t new_fence_class;
374 struct drm_fence_object *fence;
376 wait_queue_head_t event_queue;
378 unsigned long num_pages;
380 /* For pinned buffers */
381 struct drm_mm_node *pinned_node;
382 uint32_t pinned_mem_type;
383 struct list_head pinned_lru;
387 struct drm_map_list map_list;
388 uint32_t memory_type;
389 unsigned long bus_offset;
393 #ifdef DRM_ODD_MM_COMPAT
394 /* dev->struct_mutex only protected. */
395 struct list_head vma_list;
396 struct list_head p_mm_list;
401 #define _DRM_BO_FLAG_UNFENCED 0x00000001
402 #define _DRM_BO_FLAG_EVICTED 0x00000002
404 struct drm_mem_type_manager {
407 struct drm_mm manager;
408 struct list_head lru;
409 struct list_head pinned;
411 uint32_t drm_bus_maptype;
412 unsigned long gpu_offset;
413 unsigned long io_offset;
414 unsigned long io_size;
419 struct drm_user_object base;
420 wait_queue_head_t queue;
421 atomic_t write_lock_pending;
425 #define _DRM_FLAG_MEMTYPE_FIXED 0x00000001 /* Fixed (on-card) PCI memory */
426 #define _DRM_FLAG_MEMTYPE_MAPPABLE 0x00000002 /* Memory mappable */
427 #define _DRM_FLAG_MEMTYPE_CACHED 0x00000004 /* Cached binding */
428 #define _DRM_FLAG_NEEDS_IOREMAP 0x00000008 /* Fixed memory needs ioremap
429 before kernel access. */
430 #define _DRM_FLAG_MEMTYPE_CMA 0x00000010 /* Can't map aperture */
431 #define _DRM_FLAG_MEMTYPE_CSELECT 0x00000020 /* Select caching */
433 struct drm_buffer_manager {
434 struct drm_bo_lock bm_lock;
435 struct mutex evict_mutex;
438 struct drm_file *last_to_validate;
439 struct drm_mem_type_manager man[DRM_BO_MEM_TYPES];
440 struct list_head unfenced;
441 struct list_head ddestroy;
442 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
443 struct work_struct wq;
445 struct delayed_work wq;
448 unsigned long cur_pages;
452 struct drm_bo_driver {
453 const uint32_t *mem_type_prio;
454 const uint32_t *mem_busy_prio;
455 uint32_t num_mem_type_prio;
456 uint32_t num_mem_busy_prio;
457 struct drm_ttm_backend *(*create_ttm_backend_entry)
458 (struct drm_device * dev);
459 int (*fence_type) (struct drm_buffer_object *bo, uint32_t *fclass,
461 int (*invalidate_caches) (struct drm_device * dev, uint64_t flags);
462 int (*init_mem_type) (struct drm_device * dev, uint32_t type,
463 struct drm_mem_type_manager * man);
464 uint32_t(*evict_mask) (struct drm_buffer_object *bo);
465 int (*move) (struct drm_buffer_object * bo,
466 int evict, int no_wait, struct drm_bo_mem_reg * new_mem);
470 * buffer objects (drm_bo.c)
473 extern int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
474 extern int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
475 extern int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
476 extern int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
477 extern int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
478 extern int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
479 extern int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
480 extern int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
481 extern int drm_bo_setstatus_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
482 extern int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
483 extern int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
484 extern int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
485 extern int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
486 extern int drm_bo_version_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
487 extern int drm_bo_driver_finish(struct drm_device *dev);
488 extern int drm_bo_driver_init(struct drm_device *dev);
489 extern int drm_bo_pci_offset(struct drm_device *dev,
490 struct drm_bo_mem_reg * mem,
491 unsigned long *bus_base,
492 unsigned long *bus_offset,
493 unsigned long *bus_size);
494 extern int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg * mem);
496 extern void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo);
497 extern void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo);
498 extern void drm_putback_buffer_objects(struct drm_device *dev);
499 extern int drm_fence_buffer_objects(struct drm_device * dev,
500 struct list_head *list,
501 uint32_t fence_flags,
502 struct drm_fence_object * fence,
503 struct drm_fence_object ** used_fence);
504 extern void drm_bo_add_to_lru(struct drm_buffer_object * bo);
505 extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size,
506 enum drm_bo_type type, uint64_t mask,
507 uint32_t hint, uint32_t page_alignment,
508 unsigned long buffer_start,
509 struct drm_buffer_object **bo);
510 extern int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals,
512 extern int drm_bo_mem_space(struct drm_buffer_object * bo,
513 struct drm_bo_mem_reg * mem, int no_wait);
514 extern int drm_bo_move_buffer(struct drm_buffer_object * bo,
515 uint64_t new_mem_flags,
516 int no_wait, int move_unfenced);
517 extern int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type);
518 extern int drm_bo_init_mm(struct drm_device * dev, unsigned type,
519 unsigned long p_offset, unsigned long p_size);
520 extern int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle,
521 uint32_t fence_class, uint64_t flags,
522 uint64_t mask, uint32_t hint,
523 int use_old_fence_class,
524 struct drm_bo_info_rep * rep,
525 struct drm_buffer_object **bo_rep);
526 extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file * file_priv,
529 extern int drm_bo_do_validate(struct drm_buffer_object *bo,
530 uint64_t flags, uint64_t mask, uint32_t hint,
531 uint32_t fence_class,
533 struct drm_bo_info_rep *rep);
536 * Buffer object memory move- and map helpers.
540 extern int drm_bo_move_ttm(struct drm_buffer_object * bo,
541 int evict, int no_wait, struct drm_bo_mem_reg * new_mem);
542 extern int drm_bo_move_memcpy(struct drm_buffer_object * bo,
544 int no_wait, struct drm_bo_mem_reg * new_mem);
545 extern int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo,
548 uint32_t fence_class,
550 uint32_t fence_flags,
551 struct drm_bo_mem_reg * new_mem);
552 extern int drm_bo_same_page(unsigned long offset, unsigned long offset2);
553 extern unsigned long drm_bo_offset_end(unsigned long offset,
556 struct drm_bo_kmap_obj {
567 static inline void *drm_bmo_virtual(struct drm_bo_kmap_obj *map, int *is_iomem)
569 *is_iomem = (map->bo_kmap_type == bo_map_iomap ||
570 map->bo_kmap_type == bo_map_premapped);
573 extern void drm_bo_kunmap(struct drm_bo_kmap_obj *map);
574 extern int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page,
575 unsigned long num_pages, struct drm_bo_kmap_obj *map);
583 struct list_head head;
584 struct drm_fence_object *fence;
586 uint32_t new_fence_type;
589 struct drm_reg_manager {
590 struct list_head free;
591 struct list_head lru;
592 struct list_head unfenced;
594 int (*reg_reusable)(const struct drm_reg *reg, const void *data);
595 void (*reg_destroy)(struct drm_reg *reg);
598 extern int drm_regs_alloc(struct drm_reg_manager *manager,
600 uint32_t fence_class,
604 struct drm_reg **reg);
606 extern void drm_regs_fence(struct drm_reg_manager *regs,
607 struct drm_fence_object *fence);
609 extern void drm_regs_free(struct drm_reg_manager *manager);
610 extern void drm_regs_add(struct drm_reg_manager *manager, struct drm_reg *reg);
611 extern void drm_regs_init(struct drm_reg_manager *manager,
612 int (*reg_reusable)(const struct drm_reg *,
614 void (*reg_destroy)(struct drm_reg *));
618 * Simple replacement for the hardware lock on buffer manager init and clean.
622 extern void drm_bo_init_lock(struct drm_bo_lock *lock);
623 extern void drm_bo_read_unlock(struct drm_bo_lock *lock);
624 extern int drm_bo_read_lock(struct drm_bo_lock *lock);
625 extern int drm_bo_write_lock(struct drm_bo_lock *lock,
626 struct drm_file *file_priv);
628 extern int drm_bo_write_unlock(struct drm_bo_lock *lock,
629 struct drm_file *file_priv);
631 #ifdef CONFIG_DEBUG_MUTEXES
632 #define DRM_ASSERT_LOCKED(_mutex) \
633 BUG_ON(!mutex_is_locked(_mutex) || \
634 ((_mutex)->owner != current_thread_info()))
636 #define DRM_ASSERT_LOCKED(_mutex)