return ERR_PTR(PTR_ERR(table));
}
buffer->sg_table = table;
- if (buffer->flags & ION_FLAG_CACHED)
+ if (buffer->flags & ION_FLAG_CACHED) {
for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents,
i) {
if (sg_dma_len(sg) == PAGE_SIZE)
continue;
pr_err("%s: cached mappings must have pagewise "
"sg_lists\n", __func__);
- heap->ops->unmap_dma(heap, buffer);
- kfree(buffer);
- return ERR_PTR(-EINVAL);
+ ret = -EINVAL;
+ goto err;
}
- ret = ion_buffer_alloc_dirty(buffer);
- if (ret) {
- heap->ops->unmap_dma(heap, buffer);
- heap->ops->free(buffer);
- kfree(buffer);
- return ERR_PTR(ret);
+ ret = ion_buffer_alloc_dirty(buffer);
+ if (ret)
+ goto err;
}
buffer->dev = dev;
sg_dma_address(sg) = sg_phys(sg);
ion_buffer_add(dev, buffer);
return buffer;
+
+err:
+ heap->ops->unmap_dma(heap, buffer);
+ heap->ops->free(buffer);
+ kfree(buffer);
+ return ERR_PTR(ret);
}
static void ion_buffer_destroy(struct kref *kref)
if (WARN_ON(buffer->kmap_cnt > 0))
buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
-
buffer->heap->ops->unmap_dma(buffer->heap, buffer);
buffer->heap->ops->free(buffer);
mutex_lock(&dev->lock);
rb_erase(&buffer->node, &dev->buffers);
mutex_unlock(&dev->lock);
+ if (buffer->flags & ION_FLAG_CACHED)
+ kfree(buffer->dirty);
kfree(buffer);
}