return size;
}
-size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
+static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size,
+ bool skip_pools)
{
struct ion_buffer *buffer;
size_t total_drained = 0;
list);
list_del(&buffer->list);
heap->free_list_size -= buffer->size;
+ if (skip_pools)
+ buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;
total_drained += buffer->size;
spin_unlock(&heap->free_lock);
ion_buffer_destroy(buffer);
return total_drained;
}
+size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
+{
+ return _ion_heap_freelist_drain(heap, size, false);
+}
+
+size_t ion_heap_freelist_shrink(struct ion_heap *heap, size_t size)
+{
+ return _ion_heap_freelist_drain(heap, size, true);
+}
+
static int ion_heap_deferred_free(void *data)
{
struct ion_heap *heap = data;
/*
* shrink the free list first, no point in zeroing the memory if we're
- * just going to reclaim it
+ * just going to reclaim it. Also, skip any possible page pooling.
*/
if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
- freed = ion_heap_freelist_drain(heap, to_scan * PAGE_SIZE) /
+ freed = ion_heap_freelist_shrink(heap, to_scan * PAGE_SIZE) /
PAGE_SIZE;
to_scan -= freed;
* @dev: back pointer to the ion_device
* @heap: back pointer to the heap the buffer came from
* @flags: buffer specific flags
+ * @private_flags: internal buffer specific flags
* @size: size of the buffer
* @priv_virt: private data to the buffer representable as
* a void *
struct ion_device *dev;
struct ion_heap *heap;
unsigned long flags;
+ unsigned long private_flags;
size_t size;
union {
void *priv_virt;
* @map_user map memory to userspace
*
* allocate, phys, and map_user return 0 on success, -errno on error.
- * map_dma and map_kernel return pointer on success, ERR_PTR on error.
+ * map_dma and map_kernel return pointer on success, ERR_PTR on
+ * error. @free will be called with ION_PRIV_FLAG_SHRINKER_FREE set in
+ * the buffer's private_flags when called from a shrinker. In that
+ * case, the pages being free'd must be truly free'd back to the
+ * system, not put in a page pool or otherwise cached.
*/
struct ion_heap_ops {
int (*allocate)(struct ion_heap *heap,
#define ION_HEAP_FLAG_DEFER_FREE (1 << 0)
/**
+ * private flags - flags internal to ion
+ */
+/*
+ * Buffer is being freed from a shrinker function. Skip any possible
+ * heap-specific caching mechanism (e.g. page pools). Guarantees that
+ * any buffer storage that came from the system allocator will be
+ * returned to the system allocator.
+ */
+#define ION_PRIV_FLAG_SHRINKER_FREE (1 << 0)
+
+/**
* struct ion_heap - represents a heap in the system
* @node: rb node to put the heap on the device's tree of heaps
* @dev: back pointer to the ion_device
size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size);
/**
+ * ion_heap_freelist_shrink - drain the deferred free
+ * list, skipping any heap-specific
+ * pooling or caching mechanisms
+ *
+ * @heap: the heap
+ * @size: amount of memory to drain in bytes
+ *
+ * Drains the indicated amount of memory from the deferred freelist immediately.
+ * Returns the total amount freed. The total freed may be higher depending
+ * on the size of the items in the list, or lower if there is insufficient
+ * total memory on the freelist.
+ *
+ * Unlike with @ion_heap_freelist_drain, don't put any pages back into
+ * page pools or otherwise cache the pages. Everything must be
+ * genuinely free'd back to the system. If you're free'ing from a
+ * shrinker you probably want to use this. Note that this relies on
+ * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE
+ * flag.
+ */
+size_t ion_heap_freelist_shrink(struct ion_heap *heap,
+ size_t size);
+
+/**
* ion_heap_freelist_size - returns the size of the freelist in bytes
* @heap: the heap
*/
{
bool cached = ion_buffer_cached(buffer);
- if (!cached) {
+ if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)) {
struct ion_page_pool *pool = heap->pools[order_to_index(order)];
ion_page_pool_free(pool, page);
} else {
/* uncached pages come from the page pools, zero them before returning
for security purposes (other allocations are zerod at alloc time */
- if (!cached)
+ if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
ion_heap_buffer_zero(buffer);
for_each_sg(table->sgl, sg, table->nents, i)