3 * Backward compatability definitions for Direct Rendering Manager
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
11 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
12 * All rights reserved.
14 * Permission is hereby granted, free of charge, to any person obtaining a
15 * copy of this software and associated documentation files (the "Software"),
16 * to deal in the Software without restriction, including without limitation
17 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
18 * and/or sell copies of the Software, and to permit persons to whom the
19 * Software is furnished to do so, subject to the following conditions:
21 * The above copyright notice and this permission notice (including the next
22 * paragraph) shall be included in all copies or substantial portions of the
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
28 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
29 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
30 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
31 * OTHER DEALINGS IN THE SOFTWARE.
34 #ifndef _DRM_COMPAT_H_
35 #define _DRM_COMPAT_H_
38 #define minor(x) MINOR((x))
41 #ifndef MODULE_LICENSE
42 #define MODULE_LICENSE(x)
45 #ifndef preempt_disable
46 #define preempt_disable()
47 #define preempt_enable()
50 #ifndef pte_offset_map
51 #define pte_offset_map pte_offset
52 #define pte_unmap(pte)
56 #define module_param(name, type, perm)
59 /* older kernels had different irq args */
60 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
62 #define DRM_IRQ_ARGS int irq, void *arg, struct pt_regs *regs
72 #ifndef list_for_each_safe
73 #define list_for_each_safe(pos, n, head) \
74 for (pos = (head)->next, n = pos->next; pos != (head); \
75 pos = n, n = pos->next)
78 #ifndef list_for_each_entry
79 #define list_for_each_entry(pos, head, member) \
80 for (pos = list_entry((head)->next, typeof(*pos), member), \
81 prefetch(pos->member.next); \
82 &pos->member != (head); \
83 pos = list_entry(pos->member.next, typeof(*pos), member), \
84 prefetch(pos->member.next))
87 #ifndef list_for_each_entry_safe
88 #define list_for_each_entry_safe(pos, n, head, member) \
89 for (pos = list_entry((head)->next, typeof(*pos), member), \
90 n = list_entry(pos->member.next, typeof(*pos), member); \
91 &pos->member != (head); \
92 pos = n, n = list_entry(n->member.next, typeof(*n), member))
99 #if !defined(__put_page)
100 #define __put_page(p) atomic_dec(&(p)->count)
103 #if !defined(__GFP_COMP)
107 #if !defined(IRQF_SHARED)
108 #define IRQF_SHARED SA_SHIRQ
111 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
112 static inline int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t pgprot)
114 return remap_page_range(vma, from,
120 static __inline__ void *kcalloc(size_t nmemb, size_t size, int flags)
124 addr = kmalloc(size * nmemb, flags);
126 memset((void *)addr, 0, size * nmemb);
132 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
133 #define mutex_lock down
134 #define mutex_unlock up
136 #define mutex semaphore
138 #define mutex_init(a) sema_init((a), 1)
142 #ifndef DEFINE_SPINLOCK
143 #define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
146 /* old architectures */
151 /* sysfs __ATTR macro */
153 #define __ATTR(_name,_mode,_show,_store) { \
154 .attr = {.name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE }, \
160 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
161 #define vmalloc_user(_size) ({void * tmp = vmalloc(_size); \
162 if (tmp) memset(tmp, 0, _size); \
166 #ifndef list_for_each_entry_safe_reverse
167 #define list_for_each_entry_safe_reverse(pos, n, head, member) \
168 for (pos = list_entry((head)->prev, typeof(*pos), member), \
169 n = list_entry(pos->member.prev, typeof(*pos), member); \
170 &pos->member != (head); \
171 pos = n, n = list_entry(n->member.prev, typeof(*n), member))
174 #include <linux/mm.h>
175 #include <asm/page.h>
177 #if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) && \
178 (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)))
179 #define DRM_ODD_MM_COMPAT
182 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21))
183 #define DRM_FULL_MM_COMPAT
188 * Flush relevant caches and clear a VMA structure so that page references
189 * will cause a page fault. Don't flush tlbs.
192 extern void drm_clear_vma(struct vm_area_struct *vma,
193 unsigned long addr, unsigned long end);
196 * Return the PTE protection map entries for the VMA flags given by
197 * flags. This is a functional interface to the kernel's protection map.
200 extern pgprot_t vm_get_page_prot(unsigned long vm_flags);
203 #define GFP_DMA32 GFP_KERNEL
206 #define __GFP_DMA32 GFP_KERNEL
209 #if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
212 * These are too slow in earlier kernels.
215 extern int drm_unmap_page_from_agp(struct page *page);
216 extern int drm_map_page_into_agp(struct page *page);
218 #define map_page_into_agp drm_map_page_into_agp
219 #define unmap_page_from_agp drm_unmap_page_from_agp
222 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
223 extern struct page *get_nopage_retry(void);
224 extern void free_nopage_retry(void);
226 #define NOPAGE_REFAULT get_nopage_retry()
230 #ifndef DRM_FULL_MM_COMPAT
233 * For now, just return a dummy page that we've allocated out of
234 * static space. The page will be put by do_nopage() since we've already
235 * filled out the pte.
239 struct vm_area_struct *vma;
240 unsigned long address;
247 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
248 extern struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
249 unsigned long address,
251 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) && \
252 !defined(DRM_FULL_MM_COMPAT)
253 extern unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
254 unsigned long address);
255 #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) */
256 #endif /* ndef DRM_FULL_MM_COMPAT */
258 #ifdef DRM_ODD_MM_COMPAT
260 struct drm_buffer_object;
264 * Add a vma to the ttm vma list, and the
265 * process mm pointer to the ttm mm list. Needs the ttm mutex.
268 extern int drm_bo_add_vma(struct drm_buffer_object * bo,
269 struct vm_area_struct *vma);
271 * Delete a vma and the corresponding mm pointer from the
272 * ttm lists. Needs the ttm mutex.
274 extern void drm_bo_delete_vma(struct drm_buffer_object * bo,
275 struct vm_area_struct *vma);
278 * Attempts to lock all relevant mmap_sems for a ttm, while
279 * not releasing the ttm mutex. May return -EAGAIN to avoid
280 * deadlocks. In that case the caller shall release the ttm mutex,
281 * schedule() and try again.
284 extern int drm_bo_lock_kmm(struct drm_buffer_object * bo);
287 * Unlock all relevant mmap_sems for a ttm.
289 extern void drm_bo_unlock_kmm(struct drm_buffer_object * bo);
292 * If the ttm was bound to the aperture, this function shall be called
293 * with all relevant mmap sems held. It deletes the flag VM_PFNMAP from all
294 * vmas mapping this ttm. This is needed just after unmapping the ptes of
295 * the vma, otherwise the do_nopage() function will bug :(. The function
296 * releases the mmap_sems for this ttm.
299 extern void drm_bo_finish_unmap(struct drm_buffer_object *bo);
302 * Remap all vmas of this ttm using io_remap_pfn_range. We cannot
303 * fault these pfns in, because the first one will set the vma VM_PFNMAP
304 * flag, which will make the next fault bug in do_nopage(). The function
305 * releases the mmap_sems for this ttm.
308 extern int drm_bo_remap_bound(struct drm_buffer_object *bo);
312 * Remap a vma for a bound ttm. Call with the ttm mutex held and
313 * the relevant mmap_sem locked.
315 extern int drm_bo_map_bound(struct vm_area_struct *vma);
319 /* fixme when functions are upstreamed - upstreamed for 2.6.23 */
320 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23))
321 #define DRM_IDR_COMPAT_FN
323 #ifdef DRM_IDR_COMPAT_FN
324 int idr_for_each(struct idr *idp,
325 int (*fn)(int id, void *p, void *data), void *data);
326 void idr_remove_all(struct idr *idp);
330 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18))
331 void *idr_replace(struct idr *idp, void *ptr, int id);
334 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
335 extern unsigned long round_jiffies_relative(unsigned long j);
338 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
339 extern struct pci_dev * pci_get_bus_and_slot(unsigned int bus, unsigned int devfn);