spin_unlock(&obj->mmo.lock);
}
-/**
- * i915_gem_object_release_mmap - remove physical page mappings
- * @obj: obj in question
- *
- * Preserve the reservation of the mmapping with the DRM core code, but
- * relinquish ownership of the pages back to the system.
- */
-void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
-{
- i915_gem_object_release_mmap_gtt(obj);
- i915_gem_object_release_mmap_offset(obj);
-}
-
static struct i915_mmap_offset *
lookup_mmo(struct drm_i915_gem_object *obj,
enum i915_mmap_type mmap_type)
atomic_dec(&i915->mm.free_count);
}
+static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj)
+{
+ /* Skip serialisation and waking the device if known to be not used. */
+
+ if (obj->userfault_count)
+ i915_gem_object_release_mmap_gtt(obj);
+
+ if (!RB_EMPTY_ROOT(&obj->mmo.offsets)) {
+ struct i915_mmap_offset *mmo, *mn;
+
+ i915_gem_object_release_mmap_offset(obj);
+
+ rbtree_postorder_for_each_entry_safe(mmo, mn,
+ &obj->mmo.offsets,
+ offset) {
+ drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
+ &mmo->vma_node);
+ kfree(mmo);
+ }
+ obj->mmo.offsets = RB_ROOT;
+ }
+}
+
static void __i915_gem_free_objects(struct drm_i915_private *i915,
struct llist_node *freed)
{
struct drm_i915_gem_object *obj, *on;
llist_for_each_entry_safe(obj, on, freed, freed) {
- struct i915_mmap_offset *mmo, *mn;
-
trace_i915_gem_object_destroy(obj);
if (!list_empty(&obj->vma.list)) {
spin_unlock(&obj->vma.lock);
}
- i915_gem_object_release_mmap(obj);
-
- rbtree_postorder_for_each_entry_safe(mmo, mn,
- &obj->mmo.offsets,
- offset) {
- drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
- &mmo->vma_node);
- kfree(mmo);
- }
- obj->mmo.offsets = RB_ROOT;
+ __i915_gem_object_free_mmaps(obj);
- GEM_BUG_ON(obj->userfault_count);
GEM_BUG_ON(!list_empty(&obj->lut_list));
atomic_set(&obj->mm.pages_pin_count, 0);