struct list_head *lru;
int ret;
/* evict all buffers on the LRU - won't evict pinned buffers */
-
+
+ drm_mm_dump(&man->manager);
mutex_lock(&dev->struct_mutex);
do {
lru = &man->lru;
- if (lru->next == lru) {
+redo:
+ if (lru->next == &man->lru) {
DRM_ERROR("lru empty\n");
break;
}
entry = list_entry(lru->next, struct drm_buffer_object, lru);
+
+ if (entry->mem.flags & DRM_BO_FLAG_DISCARDABLE) {
+ lru = lru->next;
+ goto redo;
+ }
+
atomic_inc(&entry->usage);
mutex_unlock(&dev->struct_mutex);
mutex_lock(&entry->mutex);
- DRM_ERROR("Evicting %p %d\n", entry, entry->num_pages);
ret = drm_bo_evict(entry, mem_type, no_wait);
mutex_unlock(&entry->mutex);
*/
#define DRM_BO_FLAG_NO_MOVE (1ULL << 8)
+/*
+ * Mask: if set the note the buffer contents are discardable
+ * Flags: if set the buffer contents are discardable on migration
+ */
+#define DRM_BO_FLAG_DISCARDABLE (1ULL << 9)
+
/* Mask: Make sure the buffer is in cached memory when mapped. In conjunction
* with DRM_BO_FLAG_CACHED it also allows the buffer to be bound into the GART
* with unsnooped PTEs instead of snooped, by using chipset-specific cache