--- /dev/null
+/*\r
+ * Copyright (C) 2010 Intel Corporation\r
+ *\r
+ * Author: Ian Molton <ian.molton@collabora.co.uk>\r
+ *\r
+ * This program is free software; you can redistribute it and/or modify\r
+ * it under the terms of the GNU General Public License as published by\r
+ * the Free Software Foundation; version 2 of the License.\r
+ *\r
+ * This program is distributed in the hope that it will be useful,\r
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of\r
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r
+ * GNU General Public License for more details.\r
+ *\r
+ * You should have received a copy of the GNU General Public License\r
+ * along with this program; if not, write to the Free Software\r
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\r
+ */\r
+\r
+#include <linux/kernel.h>\r
+#include <linux/module.h>\r
+#include <linux/fs.h>\r
+#include <linux/dma-mapping.h>\r
+#include <linux/sched.h>\r
+#include <linux/slab.h>\r
+#include <linux/miscdevice.h>\r
+#include <linux/virtio.h>\r
+#include <linux/virtio_ids.h>\r
+#include <linux/virtio_config.h>\r
+\r
+#define VIRTIO_ID_GL 6\r
+#define DEVICE_NAME "glmem"\r
+\r
+/* Define to use debugging checksums on transfers */\r
+#undef DEBUG_GLIO\r
+\r
+struct virtio_gl_data {\r
+ char *buffer;\r
+ int pages;\r
+ unsigned int pid;\r
+};\r
+\r
+struct virtio_gl_header {\r
+ int pid;\r
+ int buf_size;\r
+ int r_buf_size;\r
+#ifdef DEBUG_GLIO\r
+ int sum;\r
+#endif\r
+ char buffer;\r
+} __packed;\r
+\r
+#define to_virtio_gl_data(a) ((struct virtio_gl_data *)(a)->private_data)\r
+\r
+#ifdef DEBUG_GLIO\r
+#define SIZE_OUT_HEADER (sizeof(int)*4)\r
+#define SIZE_IN_HEADER (sizeof(int)*2)\r
+#else\r
+#define SIZE_OUT_HEADER (sizeof(int)*3)\r
+#define SIZE_IN_HEADER sizeof(int)\r
+#endif\r
+\r
+static struct virtqueue *vq;\r
+\r
+\r
+/* This is videobuf_vmalloc_to_sg() from videobuf-dma-sg.c with\r
+ * some modifications\r
+ */\r
+static struct scatterlist *vmalloc_to_sg(struct scatterlist *sg_list,\r
+ unsigned char *virt, unsigned int pages)\r
+{\r
+ struct page *pg;\r
+\r
+ /* unaligned */\r
+ BUG_ON((ulong)virt & ~PAGE_MASK);\r
+\r
+ /* Fill with elements for the data */\r
+ while (pages) {\r
+ pg = vmalloc_to_page(virt);\r
+ if (!pg)\r
+ goto err;\r
+\r
+ sg_set_page(sg_list, pg, PAGE_SIZE, 0);\r
+ virt += PAGE_SIZE;\r
+ sg_list++;\r
+ pages--;\r
+ }\r
+\r
+ return sg_list;\r
+\r
+err:\r
+ kfree(sg_list);\r
+ return NULL;\r
+}\r
+\r
+static int put_data(struct virtio_gl_data *gldata)\r
+{\r
+ struct scatterlist *sg, *sg_list;\r
+ unsigned int count, ret, o_page, i_page, sg_entries;\r
+ struct virtio_gl_header *header =\r
+ (struct virtio_gl_header *)gldata->buffer;\r
+\r
+ ret = header->buf_size;\r
+\r
+ o_page = (header->buf_size + PAGE_SIZE-1) >> PAGE_SHIFT;\r
+ i_page = (header->r_buf_size + PAGE_SIZE-1) >> PAGE_SHIFT;\r
+\r
+ header->pid = gldata->pid;\r
+\r
+ if ((o_page && i_page) &&\r
+ (o_page > gldata->pages || i_page > gldata->pages)) {\r
+ i_page = 0;\r
+ }\r
+\r
+ if (o_page > gldata->pages)\r
+ o_page = gldata->pages;\r
+\r
+ if (i_page > gldata->pages)\r
+ i_page = gldata->pages;\r
+\r
+ if (!o_page)\r
+ o_page = 1;\r
+\r
+ sg_entries = o_page + i_page;\r
+\r
+ sg_list = kcalloc(sg_entries, sizeof(struct scatterlist), GFP_KERNEL);\r
+\r
+ if (!sg_list) {\r
+ ret = -EIO;\r
+ goto out;\r
+ }\r
+\r
+ sg_init_table(sg_list, sg_entries);\r
+\r
+ sg = vmalloc_to_sg(sg_list, gldata->buffer, o_page);\r
+ sg = vmalloc_to_sg(sg, gldata->buffer, i_page);\r
+\r
+ if (!sg) {\r
+ ret = -EIO;\r
+ goto out_free;\r
+ }\r
+\r
+ /* Transfer data */\r
+ if (vq->vq_ops->add_buf(vq, sg_list, o_page, i_page, (void *)1) >= 0) {\r
+ vq->vq_ops->kick(vq);\r
+ /* Chill out until it's done with the buffer. */\r
+ while (!vq->vq_ops->get_buf(vq, &count))\r
+ cpu_relax();\r
+ }\r
+\r
+out_free:\r
+ kfree(sg_list);\r
+out:\r
+ return ret;\r
+}\r
+\r
+static void free_buffer(struct virtio_gl_data *gldata)\r
+{\r
+ if (gldata->buffer) {\r
+ vfree(gldata->buffer);\r
+ gldata->buffer = NULL;\r
+ }\r
+}\r
+\r
+static int glmem_open(struct inode *inode, struct file *file)\r
+{\r
+ struct virtio_gl_data *gldata = kzalloc(sizeof(struct virtio_gl_data),\r
+ GFP_KERNEL);\r
+\r
+ if (!gldata)\r
+ return -ENXIO;\r
+\r
+ gldata->pid = pid_nr(task_pid(current));\r
+\r
+ file->private_data = gldata;\r
+\r
+ return 0;\r
+}\r
+\r
+static int glmem_mmap(struct file *filp, struct vm_area_struct *vma)\r
+{\r
+ struct virtio_gl_data *gldata = to_virtio_gl_data(filp);\r
+ int pages = (vma->vm_end - vma->vm_start) / PAGE_SIZE;\r
+\r
+ /* Set a reasonable limit */\r
+ if (pages > 16)\r
+ return -ENOMEM;\r
+\r
+ /* for now, just allow one buffer to be mmap()ed. */\r
+ if (gldata->buffer)\r
+ return -EIO;\r
+\r
+ gldata->buffer = vmalloc_user(pages*PAGE_SIZE);\r
+\r
+ if (!gldata->buffer)\r
+ return -ENOMEM;\r
+\r
+ gldata->pages = pages;\r
+\r
+ if (remap_vmalloc_range(vma, gldata->buffer, 0) < 0) {\r
+ vfree(gldata->buffer);\r
+ return -EIO;\r
+ }\r
+\r
+ vma->vm_flags |= VM_DONTEXPAND;\r
+\r
+ return 0;\r
+}\r
+\r
+static int glmem_fsync(struct file *filp, int datasync)\r
+{\r
+ struct virtio_gl_data *gldata = to_virtio_gl_data(filp);\r
+\r
+ put_data(gldata);\r
+\r
+ return 0;\r
+}\r
+\r
+static int glmem_release(struct inode *inode, struct file *file)\r
+{\r
+ struct virtio_gl_data *gldata = to_virtio_gl_data(file);\r
+\r
+ if (gldata && gldata->buffer) {\r
+ struct virtio_gl_header *header =\r
+ (struct virtio_gl_header *)gldata->buffer;\r
+\r
+ /* Make sure the host hears about the process ending / dying */\r
+ header->pid = gldata->pid;\r
+ header->buf_size = SIZE_OUT_HEADER + 2;\r
+ header->r_buf_size = SIZE_IN_HEADER;\r
+ *(short *)(&header->buffer) = -1;\r
+\r
+ put_data(gldata);\r
+ free_buffer(gldata);\r
+ }\r
+\r
+ kfree(gldata);\r
+\r
+ return 0;\r
+}\r
+\r
+static const struct file_operations glmem_fops = {\r
+ .owner = THIS_MODULE,\r
+ .open = glmem_open,\r
+ .mmap = glmem_mmap,\r
+ .fsync = glmem_fsync,\r
+ .release = glmem_release,\r
+};\r
+\r
+static struct miscdevice glmem_dev = {\r
+ MISC_DYNAMIC_MINOR,\r
+ DEVICE_NAME,\r
+ &glmem_fops\r
+};\r
+\r
+static int glmem_probe(struct virtio_device *vdev)\r
+{\r
+ int ret;\r
+\r
+ /* We expect a single virtqueue. */\r
+ vq = virtio_find_single_vq(vdev, NULL, "output");\r
+ if (IS_ERR(vq))\r
+ return PTR_ERR(vq);\r
+\r
+ ret = misc_register(&glmem_dev);\r
+ if (ret) {\r
+ printk(KERN_ERR "glmem: cannot register glmem_dev as misc");\r
+ return -ENODEV;\r
+ }\r
+\r
+ return 0;\r
+}\r
+\r
+static void __devexit glmem_remove(struct virtio_device *vdev)\r
+{\r
+ vdev->config->reset(vdev);\r
+ misc_deregister(&glmem_dev);\r
+ vdev->config->del_vqs(vdev);\r
+}\r
+\r
+static struct virtio_device_id id_table[] = {\r
+ { VIRTIO_ID_GL, VIRTIO_DEV_ANY_ID },\r
+ { 0 },\r
+};\r
+\r
+static struct virtio_driver virtio_gl_driver = {\r
+ .driver = {\r
+ .name = KBUILD_MODNAME,\r
+ .owner = THIS_MODULE,\r
+ },\r
+ .id_table = id_table,\r
+ .probe = glmem_probe,\r
+ .remove = __devexit_p(glmem_remove),\r
+};\r
+\r
+static int __init glmem_init(void)\r
+{\r
+ return register_virtio_driver(&virtio_gl_driver);\r
+}\r
+\r
+static void __exit glmem_exit(void)\r
+{\r
+ unregister_virtio_driver(&virtio_gl_driver);\r
+}\r
+\r
+module_init(glmem_init);\r
+module_exit(glmem_exit);\r
+\r
+MODULE_DEVICE_TABLE(virtio, id_table);\r
+MODULE_DESCRIPTION("Virtio gl passthrough driver");\r
+MODULE_LICENSE("GPL v2");\r
+\r