};
static int anonymous_heap_import(struct device *device, struct heap *heap,
- size_t size, enum img_mem_attr attr, uint64_t buf_hnd,
- struct buffer *buffer)
+ size_t size, enum img_mem_attr attr, uint64_t buf_fd,
+ struct page **pages, struct buffer *buffer)
{
struct buffer_data *data;
- unsigned long cpu_addr = (unsigned long)buf_hnd;
struct sg_table *sgt;
- struct page **pages;
struct scatterlist *sgl;
int num_pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
int ret;
int i;
- pr_debug("%s:%d buffer %d (0x%p) cpu_addr %#lx for PID:%d\n",
+ pr_debug("%s:%d buffer %d (0x%p) for PID:%d\n",
__func__, __LINE__, buffer->id, buffer,
- cpu_addr, task_pid_nr(current));
-
- /* Check alignment */
- if (cpu_addr & (PAGE_SIZE-1)) {
- pr_err("%s wrong alignment of %#lx address!\n",
- __func__, cpu_addr);
- return -EFAULT;
- }
-
- pages = kmalloc_array(num_pages, sizeof(struct page *),
- GFP_KERNEL | __GFP_ZERO);
- if (!pages) {
- pr_err("%s failed to allocate memory for pages\n", __func__);
- return -ENOMEM;
- }
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
- down_read(¤t->mm->mmap_sem);
-#else
- down_read(¤t->mm->mmap_lock);
-#endif
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
- ret = get_user_pages(
- cpu_addr, num_pages,
- FOLL_WRITE,
- pages, NULL);
-#else
- pr_err("%s get_user_pages not supported for this kernel version\n",
- __func__);
- ret = -1;
-#endif
-#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
- up_read(¤t->mm->mmap_sem);
-#else
- up_read(¤t->mm->mmap_lock);
-#endif
- if (ret != num_pages) {
- pr_err("%s failed to get_user_pages count:%d for %#lx address\n",
- __func__, num_pages, cpu_addr);
- ret = -ENOMEM;
- goto get_user_pages_failed;
- }
+ task_pid_nr(current));
sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!sgt) {
- ret = -ENOMEM;
- goto alloc_sgt_failed;
- }
+ if (!sgt)
+ return -ENOMEM;
ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
if (ret) {
struct page *page = pages[i];
sg_set_page(sgl, page, PAGE_SIZE, 0);
+ if (trace_physical_pages)
+ pr_info("%s:%d phys %#llx length %d\n",
+ __func__, __LINE__,
+ (unsigned long long)sg_phys(sgl), sgl->length);
+
/* Sanity check if physical address is
* accessible from the device PoV */
if (~dma_get_mask(device) & sg_phys(sgl)) {
ret = -ERANGE;
goto dma_mask_check_failed;
}
-
- if (trace_physical_pages)
- pr_info("%s:%d phys %#llx length %d\n",
- __func__, __LINE__,
- (unsigned long long)sg_phys(sgl), sgl->length);
}
pr_debug("%s:%d buffer %d orig_nents %d\n", __func__, __LINE__,
goto dma_mask_check_failed;
}
- kfree(pages);
+ /* Increase ref count for each page used */
+ for (i = 0; i < num_pages; i++)
+ if (pages[i])
+ get_page(pages[i]);
+
return 0;
dma_mask_check_failed:
sg_free_table(sgt);
alloc_sgt_pages_failed:
kfree(sgt);
-get_user_pages_failed:
- for (i = 0; i < num_pages; i++)
- if (pages[i])
- put_page(pages[i]);
-alloc_sgt_failed:
- kfree(pages);
+
return ret;
}
};
static int dmabuf_heap_import(struct device *device, struct heap *heap,
- size_t size, enum img_mem_attr attr, uint64_t buf_hnd,
- struct buffer *buffer)
+ size_t size, enum img_mem_attr attr, uint64_t buf_fd,
+ struct page **pages, struct buffer *buffer)
{
struct buffer_data *data;
int ret;
- int buf_fd = (int)buf_hnd;
+ int dmabuf_fd = (int)buf_fd;
- pr_debug("%s:%d buffer %d (0x%p) buf_fd %d\n", __func__, __LINE__,
- buffer->id, buffer, buf_fd);
+ pr_debug("%s:%d buffer %d (0x%p) dmabuf_fd %d\n", __func__, __LINE__,
+ buffer->id, buffer, dmabuf_fd);
data = kmalloc(sizeof(struct buffer_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- data->dma_buf = dma_buf_get(buf_fd);
+ data->dma_buf = dma_buf_get(dmabuf_fd);
if (IS_ERR_OR_NULL(data->dma_buf)) {
- pr_err("%s dma_buf_get fd %d\n", __func__, buf_fd);
+ pr_err("%s dma_buf_get fd %d\n", __func__, dmabuf_fd);
ret = -EINVAL;
goto dma_buf_get_failed;
}
data->attach = dma_buf_attach(data->dma_buf, device);
if (IS_ERR(data->attach)) {
- pr_err("%s dma_buf_attach fd %d\n", __func__, buf_fd);
+ pr_err("%s dma_buf_attach fd %d\n", __func__, dmabuf_fd);
ret = -EINVAL;
goto dma_buf_attach_failed;
}
data->sgt = dma_buf_map_attachment(data->attach, DMA_BIDIRECTIONAL);
if (IS_ERR(data->sgt)) {
- pr_err("%s dma_buf_map_attachment fd %d\n", __func__, buf_fd);
+ pr_err("%s dma_buf_map_attachment fd %d\n", __func__, dmabuf_fd);
ret = -EINVAL;
goto dma_buf_map_failed;
}
};
static int ion_heap_import(struct device *device, struct heap *heap,
- size_t size, enum img_mem_attr attr, uint64_t buf_hnd,
- struct buffer *buffer)
+ size_t size, enum img_mem_attr attr, uint64_t buf_fd,
+ struct page **pages, struct buffer *buffer)
{
struct buffer_data *data;
int ret;
- int buf_fd = (int)buf_hnd;
+ int ion_buf_fd = (int)buf_fd;
- pr_debug("%s:%d buffer %d (0x%p) buf_fd %d\n", __func__, __LINE__,
- buffer->id, buffer, buf_fd);
+ pr_debug("%s:%d buffer %d (0x%p) ion_buf_fd %d\n", __func__, __LINE__,
+ buffer->id, buffer, ion_buf_fd);
data = kmalloc(sizeof(struct buffer_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->client = heap->priv;
- data->handle = ion_import_dma_buf(data->client, buf_fd);
+ data->handle = ion_import_dma_buf(data->client, ion_buf_fd);
if (IS_ERR_OR_NULL(data->handle)) {
- pr_err("%s ion_import_dma_buf fd %d\n", __func__, buf_fd);
+ pr_err("%s ion_import_dma_buf fd %d\n", __func__, ion_buf_fd);
ret = -EINVAL;
goto ion_import_dma_buf_failed;
}
data->sgt = ion_sg_table(data->client, data->handle);
if (IS_ERR(data->sgt)) {
- pr_err("%s ion_sg_table fd %d\n", __func__, buf_fd);
+ pr_err("%s ion_sg_table fd %d\n", __func__, ion_buf_fd);
ret = -EINVAL;
goto ion_sg_table_failed;
}
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
+#include <linux/delay.h>
#include <img_mem_man.h>
#include <vha_drv_common.h>
static int _img_mem_import(struct device *device,
struct mem_ctx *ctx, struct heap *heap,
- size_t size, enum img_mem_attr attr, uint64_t buf_hnd,
- struct buffer **buffer_new)
+ size_t size, enum img_mem_attr attr, uint64_t buf_fd,
+ struct page **pages, struct buffer **buffer_new)
{
struct mem_man *mem_man = &mem_man_data;
struct buffer *buffer;
}
ret = heap->ops->import(device, heap, buffer->actual_size, attr,
- buf_hnd, buffer);
+ buf_fd, pages, buffer);
if (ret) {
pr_err("%s: heap %d import failed\n", __func__, heap->id);
goto heap_import_failed;
return ret;
}
+static void _img_mem_put_pages(size_t size, struct page **pages)
+{
+ int num_pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+ int i;
+
+ for (i = 0; i < num_pages; i++)
+ if (pages[i])
+ put_page(pages[i]);
+ kfree(pages);
+}
+
+static int _img_mem_get_user_pages(size_t size, uint64_t cpu_ptr,
+ struct page **pages[])
+{
+ struct page **tmp_pages = NULL;
+ int num_pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+ int ret;
+ int cnt = 0;
+
+ /* Check alignment */
+ if (cpu_ptr & (PAGE_SIZE-1)) {
+ pr_err("%s wrong alignment of %#llx address!\n",
+ __func__, cpu_ptr);
+ return -EFAULT;
+ }
+
+ tmp_pages = kmalloc_array(num_pages, sizeof(struct page *),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!tmp_pages) {
+ pr_err("%s failed to allocate memory for pages\n", __func__);
+ return -ENOMEM;
+ }
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+ down_read(¤t->mm->mmap_sem);
+#else
+ mmap_read_lock(current->mm);
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
+ ret = get_user_pages(
+ cpu_ptr, num_pages,
+ 0,
+ tmp_pages, NULL);
+#else
+ pr_err("%s get_user_pages not supported for this kernel version\n",
+ __func__);
+ ret = -1;
+#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+ up_read(¤t->mm->mmap_sem);
+#else
+ mmap_read_unlock(current->mm);
+#endif
+ if (ret != num_pages) {
+ pr_err("%s failed to get_user_pages count:%d for %#llx address\n",
+ __func__, num_pages, cpu_ptr);
+ ret = -ENOMEM;
+ goto out_get_user_pages;
+ }
+
+ *pages = tmp_pages;
+
+ return 0;
+
+out_get_user_pages:
+ _img_mem_put_pages(size, tmp_pages);
+
+ return ret;
+}
+
int img_mem_import(struct device *device, struct mem_ctx *ctx, int heap_id,
- size_t size, enum img_mem_attr attr, uint64_t buf_hnd,
- int *buf_id)
+ size_t size, enum img_mem_attr attr, uint64_t buf_fd,
+ uint64_t cpu_ptr, int *buf_id)
{
struct mem_man *mem_man = &mem_man_data;
struct heap *heap;
struct buffer *buffer;
+ struct page **pages = NULL;
int ret;
- pr_debug("%s heap %d ctx %p hnd %#llx\n", __func__, heap_id, ctx, buf_hnd);
+ pr_debug("%s heap %d ctx %p hnd %#llx\n", __func__, heap_id, ctx, buf_fd);
+
+ if (cpu_ptr) {
+ ret = _img_mem_get_user_pages(size, cpu_ptr, &pages);
+ if (ret) {
+ pr_err("%s:%d getting user pages failed\n", __func__, __LINE__);
+ return ret;
+ }
+ }
ret = mutex_lock_interruptible(&mem_man->mutex);
- if (ret)
- return ret;
+ if (ret) {
+ pr_err("%s:%d lock interrupted: mem_man->mutex\n", __func__, __LINE__);
+
+ goto lock_interrupted;
+ }
heap = idr_find(&mem_man->heaps, heap_id);
if (!heap) {
pr_err("%s: heap id %d not found\n", __func__, heap_id);
- mutex_unlock(&mem_man->mutex);
- return -EINVAL;
+ ret = -EINVAL;
+ goto idr_find_failed;
}
- ret = _img_mem_import(device, ctx, heap, size, attr, buf_hnd, &buffer);
- if (ret) {
- mutex_unlock(&mem_man->mutex);
- return ret;
- }
+ ret = _img_mem_import(device, ctx, heap, size, attr, buf_fd, pages, &buffer);
+ if (ret)
+ goto mem_import_failed;
*buf_id = buffer->id;
mutex_unlock(&mem_man->mutex);
- pr_debug("%s buf_hnd %#llx heap %d (%s) buffer %d size %zu\n", __func__,
- buf_hnd, heap_id, get_heap_name(heap->type), *buf_id, size);
+ if (cpu_ptr)
+ _img_mem_put_pages(size, pages);
+
+ pr_debug("%s buf_fd %#llx heap %d (%s) buffer %d size %zu\n", __func__,
+ buf_fd, heap_id, get_heap_name(heap->type), *buf_id, size);
pr_debug("%s heap %d ctx %p created buffer %d (%p) size %zu\n",
__func__, heap_id, ctx, *buf_id, buffer, size);
+
+ return 0;
+
+mem_import_failed:
+idr_find_failed:
+ mutex_unlock(&mem_man->mutex);
+lock_interrupted:
+ if (cpu_ptr)
+ _img_mem_put_pages(size, pages);
+
return ret;
}
EXPORT_SYMBOL(img_mem_import);
if (ret) {
pr_err("%s: heap %d buffer %d no sg_table!\n",
__func__, heap->id, buffer->id);
+ mutex_unlock(&mem_man->mutex);
return -1;
}
sgl = sgt->sgl;
if (!sgl) {
pr_err("%s: heap %d buffer %d wrong offset %d!\n",
__func__, heap->id, buffer->id, offset);
+ mutex_unlock(&mem_man->mutex);
return -1;
}
if (ret) {
pr_err("%s: heap %d buffer %d no page array!\n",
__func__, heap->id, buffer->id);
+ mutex_unlock(&mem_man->mutex);
return -1;
}
if (offset > buffer->actual_size) {
pr_err("%s: heap %d buffer %d wrong offset %d!\n",
__func__, heap->id, buffer->id, offset);
+ mutex_unlock(&mem_man->mutex);
return -1;
}
addr = addrs[page_idx];
size_t size, enum img_mem_attr attr,
struct buffer *buffer);
int (*import)(struct device *device, struct heap *heap,
- size_t size, enum img_mem_attr attr, uint64_t buf_hnd,
- struct buffer *buffer);
+ size_t size, enum img_mem_attr attr, uint64_t buf_fd,
+ struct page **pages, struct buffer *buffer);
int (*export)(struct device *device, struct heap *heap,
size_t size, enum img_mem_attr attr, struct buffer *buffer,
uint64_t *buf_hnd);
int img_mem_alloc(struct device *device, struct mem_ctx *ctx, int heap_id,
size_t size, enum img_mem_attr attributes, int *buf_id);
int img_mem_import(struct device *device, struct mem_ctx *ctx, int heap_id,
- size_t size, enum img_mem_attr attributes, uint64_t buf_hnd,
- int *buf_id);
+ size_t size, enum img_mem_attr attributes, uint64_t buf_fd,
+ uint64_t cpu_ptr, int *buf_id);
int img_mem_export(struct device *device, struct mem_ctx *ctx, int buf_id,
size_t size, enum img_mem_attr attributes, uint64_t *buf_hnd);
void img_mem_free(struct mem_ctx *ctx, int buf_id);
/* parameters to import a device buffer */
struct vha_import_data {
uint64_t size; /* [IN] Size of device memory (in bytes) */
- uint64_t buf_hnd; /* [IN] File descriptor/cpu pointer
- of buffer to import */
+ uint64_t buf_fd; /* [IN] File descriptor */
+ uint64_t cpu_ptr; /* [IN] Cpu pointer of buffer to import */
uint32_t heap_id; /* [IN] Heap ID of allocator */
uint32_t attributes; /* [IN] Attributes of buffer */
char name[8]; /* [IN] short name for buffer */
#include <linux/platform_device.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/reset.h>
+#include <linux/delay.h>
#include <linux/io.h>
#include <linux/pm.h>
#include <linux/version.h>
} irq_poll_timer;
+struct vha_prvdata {
+ struct reset_control *rst;
+};
+
static ssize_t info_show(struct device_driver *drv, char *buf);
static irqreturn_t dt_plat_thread_irq(int irq, void *dev_id)
struct resource res;
void __iomem *reg_addr;
uint32_t reg_size, core_size;
+ struct vha_prvdata *data;
char info[256];
info_show(ofdev->dev.driver, info);
goto out_add_dev;
}
+ data = devm_kzalloc(&ofdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ dev_err(&ofdev->dev, "vha private data allocate error\n");
+ ret = -ENOMEM;
+ goto out_add_dev;
+ }
+
+ data->rst = devm_reset_control_get_optional_shared(&ofdev->dev, NULL);
+ if (IS_ERR(data->rst)) {
+ ret = PTR_ERR(data->rst);
+ return ret;
+ }
+
/* no 'per device' memory heaps used */
ret = vha_add_dev(&ofdev->dev, NULL, 0,
- NULL /* plat priv data */, reg_addr, core_size);
+ data, reg_addr, core_size);
if (ret) {
dev_err(&ofdev->dev, "failed to intialize driver core!\n");
goto out_add_dev;
if (copy_from_user(&data, buf, sizeof(data)))
return -EFAULT;
- dev_dbg(miscdev->this_device, "%s: session %u, buf_hnd 0x%016llx, size %llu, heap_id %u\n",
- __func__, session->id, data.buf_hnd, data.size, data.heap_id);
+ dev_dbg(miscdev->this_device, "%s: session %u, buf_fd 0x%016llx, size %llu, heap_id %u\n",
+ __func__, session->id, data.buf_fd, data.size, data.heap_id);
ret = img_mem_import(session->vha->dev, session->mem_ctx, data.heap_id,
- (size_t)data.size, data.attributes, data.buf_hnd,
- &data.buf_id);
+ (size_t)data.size, data.attributes, data.buf_fd,
+ data.cpu_ptr, &data.buf_id);
if (ret)
return ret;