WARN_ON(msm_obj->active_count != 0);
if (msm_obj->dontneed)
- mark_unpurgable(msm_obj);
+ mark_unpurgeable(msm_obj);
list_del(&msm_obj->mm_list);
if (msm_obj->madv == MSM_MADV_WILLNEED) {
list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
} else if (msm_obj->madv == MSM_MADV_DONTNEED) {
list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed);
- mark_purgable(msm_obj);
+ mark_purgeable(msm_obj);
} else {
WARN_ON(msm_obj->madv != __MSM_MADV_PURGED);
list_add_tail(&msm_obj->mm_list, &priv->inactive_purged);
madv = " purged";
break;
case MSM_MADV_DONTNEED:
- stats->purgable.count++;
- stats->purgable.size += obj->size;
+ stats->purgeable.count++;
+ stats->purgeable.size += obj->size;
madv = " purgeable";
break;
case MSM_MADV_WILLNEED:
seq_printf(m, "Active: %4d objects, %9zu bytes\n",
stats.active.count, stats.active.size);
seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
- stats.purgable.count, stats.purgable.size);
+ stats.purgeable.count, stats.purgeable.size);
seq_printf(m, "Purged: %4d objects, %9zu bytes\n",
stats.purged.count, stats.purged.size);
}
mutex_lock(&priv->mm_lock);
if (msm_obj->dontneed)
- mark_unpurgable(msm_obj);
+ mark_unpurgeable(msm_obj);
list_del(&msm_obj->mm_list);
mutex_unlock(&priv->mm_lock);
struct {
unsigned count;
size_t size;
- } all, active, purgable, purged;
+ } all, active, purgeable, purged;
};
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
return msm_obj->active_count;
}
-/* imported/exported objects are not purgable: */
-static inline bool is_unpurgable(struct msm_gem_object *msm_obj)
+/* imported/exported objects are not purgeable: */
+static inline bool is_unpurgeable(struct msm_gem_object *msm_obj)
{
return msm_obj->base.dma_buf && msm_obj->base.import_attach;
}
static inline bool is_purgeable(struct msm_gem_object *msm_obj)
{
return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt &&
- !is_unpurgable(msm_obj);
+ !is_unpurgeable(msm_obj);
}
static inline bool is_vunmapable(struct msm_gem_object *msm_obj)
return (msm_obj->vmap_count == 0) && msm_obj->vaddr;
}
-static inline void mark_purgable(struct msm_gem_object *msm_obj)
+static inline void mark_purgeable(struct msm_gem_object *msm_obj)
{
struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
WARN_ON(!mutex_is_locked(&priv->mm_lock));
- if (is_unpurgable(msm_obj))
+ if (is_unpurgeable(msm_obj))
return;
if (WARN_ON(msm_obj->dontneed))
msm_obj->dontneed = true;
}
-static inline void mark_unpurgable(struct msm_gem_object *msm_obj)
+static inline void mark_unpurgeable(struct msm_gem_object *msm_obj)
{
struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
WARN_ON(!mutex_is_locked(&priv->mm_lock));
- if (is_unpurgable(msm_obj))
+ if (is_unpurgeable(msm_obj))
return;
if (WARN_ON(!msm_obj->dontneed))
/*
* Now that we own a reference, we can drop mm_lock for the
* rest of the loop body, to reduce contention with the
- * retire_submit path (which could make more objects purgable)
+ * retire_submit path (which could make more objects purgeable)
*/
mutex_unlock(&priv->mm_lock);