if (list_empty(&bm->unfenced))
DRM_DEBUG("Unfenced list was clean\n");
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
- unlock_page(bm->dummy_read_page);
-#else
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
ClearPageReserved(bm->dummy_read_page);
#endif
__free_page(bm->dummy_read_page);
goto out_unlock;
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
- SetPageLocked(bm->dummy_read_page);
-#else
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
SetPageReserved(bm->dummy_read_page);
#endif
drm_free_memctl(PAGE_SIZE);
return NULL;
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
- SetPageLocked(page);
-#else
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
SetPageReserved(page);
#endif
return page;
static void drm_ttm_free_user_pages(struct drm_ttm *ttm)
{
- struct mm_struct *mm = ttm->user_mm;
int write;
int dirty;
struct page *page;
write = ((ttm->page_flags & DRM_TTM_PAGE_USER_WRITE) != 0);
dirty = ((ttm->page_flags & DRM_TTM_PAGE_USER_DIRTY) != 0);
- down_read(&mm->mmap_sem);
for (i = 0; i < ttm->num_pages; ++i) {
page = ttm->pages[i];
if (page == NULL)
}
if (write && dirty && !PageReserved(page))
- SetPageDirty(page);
+ set_page_dirty_lock(page);
ttm->pages[i] = NULL;
- page_cache_release(page);
+ put_page(page);
}
- up_read(&mm->mmap_sem);
}
static void drm_ttm_free_alloced_pages(struct drm_ttm *ttm)
for (i = 0; i < ttm->num_pages; ++i) {
cur_page = ttm->pages + i;
if (*cur_page) {
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
- unlock_page(*cur_page);
-#else
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
ClearPageReserved(*cur_page);
#endif
if (page_count(*cur_page) != 1)
BUG_ON(num_pages != ttm->num_pages);
- ttm->user_mm = mm;
ttm->dummy_read_page = dummy_read_page;
ttm->page_flags |= DRM_TTM_PAGE_USER |
((write) ? DRM_TTM_PAGE_USER_WRITE : 0);