source "drivers/gpu/drm/udl/Kconfig"
+source "drivers/gpu/drm/vigs/Kconfig"
+
source "drivers/gpu/drm/ast/Kconfig"
source "drivers/gpu/drm/mgag200/Kconfig"
obj-$(CONFIG_DRM_EXYNOS) +=exynos/
obj-$(CONFIG_DRM_GMA500) += gma500/
obj-$(CONFIG_DRM_UDL) += udl/
+obj-$(CONFIG_DRM_VIGS) += vigs/
obj-$(CONFIG_DRM_AST) += ast/
obj-$(CONFIG_DRM_RCAR_DU) += rcar-du/
obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
--- /dev/null
+#
+# VIGS configuration
+#
+
+config DRM_VIGS
+ tristate "DRM Support for VIGS"
+ depends on DRM && PCI
+ select DRM_KMS_HELPER
+ select DRM_TTM
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
+ help
+ This module enables VIGS passthrough from emulated system
+ to hypervisor (for example, QEMU).
+
+config DRM_VIGS_DEBUG
+ bool "VIGS debug messages"
+ depends on DRM_VIGS
+ default no
+ help
+ Enable VIGS debug messages.
--- /dev/null
+#
+# Makefile for the drm device driver. This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/vigs
+vigs_drm-y := main.o \
+ vigs_driver.o \
+ vigs_gem.o \
+ vigs_device.o \
+ vigs_mman.o \
+ vigs_buffer.o \
+ vigs_crtc.o \
+ vigs_output.o \
+ vigs_framebuffer.o \
+ vigs_comm.o \
+ vigs_fbdev.o
+
+obj-$(CONFIG_DRM_VIGS) += vigs_drm.o
--- /dev/null
+#include "vigs_driver.h"
+#include <linux/module.h>
+#include <linux/init.h>
+
+MODULE_AUTHOR("Stanislav Vorobiov");
+MODULE_LICENSE("Dual BSD/GPL");
+
+int vigs_init(void)
+{
+ int ret = vigs_driver_register();
+
+ if (ret != 0) {
+ return ret;
+ }
+
+ return 0;
+}
+
+void vigs_cleanup(void)
+{
+ vigs_driver_unregister();
+}
+
+module_init(vigs_init);
+module_exit(vigs_cleanup);
--- /dev/null
+#include "vigs_buffer.h"
+#include "vigs_mman.h"
+#include <drm/vigs_drm.h>
+#include <ttm/ttm_placement.h>
+
+static void vigs_buffer_destroy(struct kref *kref)
+{
+ struct vigs_buffer_object *vigs_bo = kref_to_vigs_buffer(kref);
+ struct ttm_buffer_object *bo = &(vigs_bo->base);
+
+ vigs_buffer_kunmap(vigs_bo);
+
+ DRM_DEBUG_DRIVER("buffer destroyed (dom = %u, off = %lu, sz = %lu)\n",
+ vigs_bo->domain,
+ vigs_buffer_offset(vigs_bo),
+ vigs_buffer_accounted_size(vigs_bo));
+
+ ttm_bo_unref(&bo);
+}
+
+static void vigs_buffer_base_destroy(struct ttm_buffer_object *bo)
+{
+ struct vigs_buffer_object *vigs_bo = bo_to_vigs_buffer(bo);
+
+ kfree(vigs_bo);
+}
+
+int vigs_buffer_create(struct vigs_mman *mman,
+ unsigned long size,
+ bool kernel,
+ u32 domain,
+ struct vigs_buffer_object **vigs_bo)
+{
+ u32 placements[1];
+ struct ttm_placement placement;
+ enum ttm_bo_type type;
+ int ret = 0;
+
+ if (size == 0) {
+ return -EINVAL;
+ }
+
+ *vigs_bo = NULL;
+
+ if (domain == DRM_VIGS_GEM_DOMAIN_VRAM) {
+ placements[0] =
+ TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_NO_EVICT;
+ } else if (domain == DRM_VIGS_GEM_DOMAIN_RAM) {
+ placements[0] =
+ TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0 | TTM_PL_FLAG_NO_EVICT;
+ } else {
+ return -EINVAL;
+ }
+
+ memset(&placement, 0, sizeof(placement));
+
+ placement.placement = placements;
+ placement.busy_placement = placements;
+ placement.num_placement = 1;
+ placement.num_busy_placement = 1;
+
+ if (kernel) {
+ type = ttm_bo_type_kernel;
+ } else {
+ type = ttm_bo_type_device;
+ }
+
+ *vigs_bo = kzalloc(sizeof(**vigs_bo), GFP_KERNEL);
+
+ if (!*vigs_bo) {
+ return -ENOMEM;
+ }
+
+ ret = ttm_bo_init(&mman->bo_dev, &(*vigs_bo)->base, size, type,
+ &placement, 0, 0,
+ false, NULL, size,
+ &vigs_buffer_base_destroy);
+
+ if (ret != 0) {
+ /*
+ * '*vigs_bo' is freed by 'ttm_bo_init'
+ */
+ *vigs_bo = NULL;
+ return ret;
+ }
+
+ (*vigs_bo)->domain = domain;
+
+ kref_init(&(*vigs_bo)->kref);
+
+ DRM_DEBUG_DRIVER("buffer created (dom = %u, off = %lu, sz = %lu)\n",
+ (*vigs_bo)->domain,
+ vigs_buffer_offset(*vigs_bo),
+ vigs_buffer_accounted_size(*vigs_bo));
+
+ return 0;
+}
+
+void vigs_buffer_acquire(struct vigs_buffer_object *vigs_bo)
+{
+ if (vigs_bo) {
+ kref_get(&vigs_bo->kref);
+ }
+}
+
+void vigs_buffer_release(struct vigs_buffer_object *vigs_bo)
+{
+ if (vigs_bo) {
+ kref_put(&vigs_bo->kref, vigs_buffer_destroy);
+ }
+}
+
+int vigs_buffer_kmap(struct vigs_buffer_object *vigs_bo)
+{
+ bool is_iomem;
+ int ret;
+
+ if (vigs_bo->kptr) {
+ return 0;
+ }
+
+ ret = ttm_bo_kmap(&vigs_bo->base,
+ 0,
+ vigs_bo->base.num_pages,
+ &vigs_bo->kmap);
+
+ if (ret != 0) {
+ return ret;
+ }
+
+ vigs_bo->kptr = ttm_kmap_obj_virtual(&vigs_bo->kmap, &is_iomem);
+
+ DRM_DEBUG_DRIVER("buffer (dom = %u, off = %lu, sz = %lu) mapped to %p\n",
+ vigs_bo->domain,
+ vigs_buffer_offset(vigs_bo),
+ vigs_buffer_accounted_size(vigs_bo),
+ vigs_bo->kptr);
+
+ return 0;
+}
+
+void vigs_buffer_kunmap(struct vigs_buffer_object *vigs_bo)
+{
+ if (vigs_bo->kptr == NULL) {
+ return;
+ }
+
+ vigs_bo->kptr = NULL;
+
+ ttm_bo_kunmap(&vigs_bo->kmap);
+
+ DRM_DEBUG_DRIVER("buffer (dom = %u, off = %lu, sz = %lu) unmapped\n",
+ vigs_bo->domain,
+ vigs_buffer_offset(vigs_bo),
+ vigs_buffer_accounted_size(vigs_bo));
+}
--- /dev/null
+#ifndef _VIGS_BUFFER_H_
+#define _VIGS_BUFFER_H_
+
+#include "drmP.h"
+#include <ttm/ttm_bo_driver.h>
+
+struct vigs_mman;
+
+struct vigs_buffer_object
+{
+ struct ttm_buffer_object base;
+
+ u32 domain;
+
+ /*
+ * ttm_buffer_object::destroy isn't good enough for us because
+ * we want to 'vigs_buffer_kunmap' before object destruction and
+ * it's too late for that in ttm_buffer_object::destroy.
+ */
+ struct kref kref;
+
+ /*
+ * Valid only after successful call to 'vigs_buffer_kmap'.
+ * @{
+ */
+
+ struct ttm_bo_kmap_obj kmap;
+ void *kptr; /* Kernel pointer to buffer data. */
+
+ /*
+ * @}
+ */
+};
+
+static inline struct vigs_buffer_object *bo_to_vigs_buffer(struct ttm_buffer_object *bo)
+{
+ return container_of(bo, struct vigs_buffer_object, base);
+}
+
+static inline struct vigs_buffer_object *kref_to_vigs_buffer(struct kref *kref)
+{
+ return container_of(kref, struct vigs_buffer_object, kref);
+}
+
+/*
+ * when 'kernel' is true the buffer will be accessible from
+ * kernel only.
+ * 'domain' must be either VRAM or RAM. CPU domain is not supported.
+ */
+int vigs_buffer_create(struct vigs_mman *mman,
+ unsigned long size,
+ bool kernel,
+ u32 domain,
+ struct vigs_buffer_object **vigs_bo);
+
+/*
+ * Page aligned buffer size.
+ */
+static inline unsigned long vigs_buffer_size(struct vigs_buffer_object *vigs_bo)
+{
+ return vigs_bo->base.num_pages << PAGE_SHIFT;
+}
+
+/*
+ * Actual size that was passed to 'vigs_buffer_create'.
+ */
+static inline unsigned long vigs_buffer_accounted_size(struct vigs_buffer_object *vigs_bo)
+{
+ return vigs_bo->base.acc_size;
+}
+
+/*
+ * Buffer offset relative to 0.
+ */
+static inline unsigned long vigs_buffer_offset(struct vigs_buffer_object *vigs_bo)
+{
+ return vigs_bo->base.offset;
+}
+
+/*
+ * Buffer offset relative to DRM_FILE_OFFSET. For kernel buffers it's always 0.
+ */
+static inline u64 vigs_buffer_mmap_offset(struct vigs_buffer_object *vigs_bo)
+{
+ return vigs_bo->base.addr_space_offset;
+}
+
+static inline void vigs_buffer_reserve(struct vigs_buffer_object *vigs_bo)
+{
+ int ret;
+
+ ret = ttm_bo_reserve(&vigs_bo->base, false, false, false, 0);
+
+ BUG_ON(ret != 0);
+}
+
+static inline void vigs_buffer_unreserve(struct vigs_buffer_object *vigs_bo)
+{
+ ttm_bo_unreserve(&vigs_bo->base);
+}
+
+/*
+ * Functions below MUST NOT be called between
+ * vigs_buffer_reserve/vigs_buffer_unreserve.
+ * @{
+ */
+
+/*
+ * Increments ref count.
+ * Passing NULL won't hurt, this is for convenience.
+ */
+void vigs_buffer_acquire(struct vigs_buffer_object *vigs_bo);
+
+/*
+ * Decrements ref count, releases and sets 'vigs_bo' to NULL when 0.
+ * Passing NULL won't hurt, this is for convenience.
+ */
+void vigs_buffer_release(struct vigs_buffer_object *vigs_bo);
+
+/*
+ * @}
+ */
+
+/*
+ * Functions below MUST be called between
+ * vigs_buffer_reserve/vigs_buffer_unreserve if simultaneous access
+ * from different threads is possible.
+ * @{
+ */
+
+int vigs_buffer_kmap(struct vigs_buffer_object *vigs_bo);
+
+void vigs_buffer_kunmap(struct vigs_buffer_object *vigs_bo);
+
+/*
+ * @}
+ */
+
+#endif
--- /dev/null
+#include "vigs_comm.h"
+#include "vigs_device.h"
+#include "vigs_gem.h"
+#include "vigs_buffer.h"
+#include <drm/vigs_drm.h>
+
+static int vigs_comm_prepare(struct vigs_comm *comm,
+ vigsp_cmd cmd,
+ unsigned long request_size,
+ unsigned long response_size,
+ void **request,
+ void **response)
+{
+ int ret;
+ void *ptr;
+ struct vigsp_cmd_request_header *request_header;
+ unsigned long total_size = sizeof(struct vigsp_cmd_request_header) +
+ request_size +
+ sizeof(struct vigsp_cmd_response_header) +
+ response_size;
+
+ if (!comm->cmd_gem || (vigs_buffer_size(comm->cmd_gem->bo) < total_size)) {
+ if (comm->cmd_gem) {
+ drm_gem_object_unreference_unlocked(&comm->cmd_gem->base);
+ comm->cmd_gem = NULL;
+ }
+
+ ret = vigs_gem_create(comm->vigs_dev,
+ total_size,
+ true,
+ DRM_VIGS_GEM_DOMAIN_RAM,
+ &comm->cmd_gem);
+
+ if (ret != 0) {
+ DRM_ERROR("unable to create command GEM\n");
+ return ret;
+ }
+
+ ret = vigs_buffer_kmap(comm->cmd_gem->bo);
+
+ if (ret != 0) {
+ DRM_ERROR("unable to kmap command GEM\n");
+
+ drm_gem_object_unreference_unlocked(&comm->cmd_gem->base);
+ comm->cmd_gem = NULL;
+
+ return ret;
+ }
+ }
+
+ ptr = comm->cmd_gem->bo->kptr;
+
+ memset(ptr, 0, vigs_buffer_size(comm->cmd_gem->bo));
+
+ request_header = ptr;
+
+ request_header->cmd = cmd;
+ request_header->response_offset = request_size;
+
+ if (request) {
+ *request = (request_header + 1);
+ }
+
+ if (response) {
+ *response = (void*)(request_header + 1) +
+ request_size +
+ sizeof(struct vigsp_cmd_response_header);
+ }
+
+ return 0;
+}
+
+static int vigs_comm_exec(struct vigs_comm *comm)
+{
+ struct vigsp_cmd_request_header *request_header = comm->cmd_gem->bo->kptr;
+ struct vigsp_cmd_response_header *response_header =
+ (void*)(request_header + 1) + request_header->response_offset;
+
+ /*
+ * 'writel' already has the mem barrier, so it's ok to just access the
+ * response data afterwards.
+ */
+
+ writel(vigs_buffer_offset(comm->cmd_gem->bo),
+ VIGS_USER_PTR(comm->io_ptr, 0) + VIGS_REG_RAM_OFFSET);
+
+ switch (response_header->status) {
+ case vigsp_status_success:
+ return 0;
+ case vigsp_status_bad_call:
+ DRM_ERROR("bad host call\n");
+ return -EINVAL;
+ case vigsp_status_exec_error:
+ DRM_ERROR("host exec error\n");
+ return -EIO;
+ default:
+ DRM_ERROR("fatal host error\n");
+ return -ENXIO;
+ }
+}
+
+static int vigs_comm_init(struct vigs_comm *comm)
+{
+ int ret;
+ struct vigsp_cmd_init_request *request;
+ struct vigsp_cmd_init_response *response;
+
+ ret = vigs_comm_prepare(comm,
+ vigsp_cmd_init,
+ sizeof(*request),
+ sizeof(*response),
+ (void**)&request,
+ (void**)&response);
+
+ if (ret != 0) {
+ return ret;
+ }
+
+ request->client_version = VIGS_PROTOCOL_VERSION;
+
+ ret = vigs_comm_exec(comm);
+
+ if (ret != 0) {
+ return ret;
+ }
+
+ if (response->server_version != VIGS_PROTOCOL_VERSION) {
+ DRM_ERROR("protocol version mismatch, expected %u, actual %u\n",
+ VIGS_PROTOCOL_VERSION,
+ response->server_version);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void vigs_comm_exit(struct vigs_comm *comm)
+{
+ int ret;
+
+ ret = vigs_comm_prepare(comm, vigsp_cmd_exit, 0, 0, NULL, NULL);
+
+ if (ret != 0) {
+ return;
+ }
+
+ vigs_comm_exec(comm);
+}
+
+int vigs_comm_create(struct vigs_device *vigs_dev,
+ struct vigs_comm **comm)
+{
+ int ret = 0;
+
+ DRM_DEBUG_DRIVER("enter\n");
+
+ *comm = kzalloc(sizeof(**comm), GFP_KERNEL);
+
+ if (!*comm) {
+ ret = -ENOMEM;
+ goto fail1;
+ }
+
+ (*comm)->vigs_dev = vigs_dev;
+ (*comm)->io_ptr = vigs_dev->io_map->handle;
+
+ ret = vigs_comm_init(*comm);
+
+ if (ret != 0) {
+ goto fail2;
+ }
+
+ /*
+ * We're always guaranteed that 'user_map' has at least one element
+ * and we should use it, just stuff in 'this' pointer in order
+ * not to loose this slot.
+ */
+ vigs_dev->user_map[0] = (struct drm_file*)(*comm);
+
+ return 0;
+
+fail2:
+ if ((*comm)->cmd_gem) {
+ drm_gem_object_unreference_unlocked(&(*comm)->cmd_gem->base);
+ }
+ kfree(*comm);
+fail1:
+ *comm = NULL;
+
+ return ret;
+}
+
+void vigs_comm_destroy(struct vigs_comm *comm)
+{
+ DRM_DEBUG_DRIVER("enter\n");
+
+ vigs_comm_exit(comm);
+ comm->vigs_dev->user_map[0] = NULL;
+ if (comm->cmd_gem) {
+ drm_gem_object_unreference_unlocked(&comm->cmd_gem->base);
+ }
+ kfree(comm);
+}
+
+int vigs_comm_reset(struct vigs_comm *comm)
+{
+ int ret;
+
+ ret = vigs_comm_prepare(comm, vigsp_cmd_reset, 0, 0, NULL, NULL);
+
+ if (ret != 0) {
+ return ret;
+ }
+
+ return vigs_comm_exec(comm);
+}
+
+int vigs_comm_create_surface(struct vigs_comm *comm,
+ unsigned int width,
+ unsigned int height,
+ unsigned int stride,
+ vigsp_surface_format format,
+ struct vigs_gem_object *sfc_gem,
+ vigsp_surface_id *id)
+{
+ int ret;
+ struct vigsp_cmd_create_surface_request *request;
+ struct vigsp_cmd_create_surface_response *response;
+
+ DRM_DEBUG_DRIVER("width = %u, height = %u, stride = %u, fmt = %d\n",
+ width,
+ height,
+ stride,
+ format);
+
+ ret = vigs_comm_prepare(comm,
+ vigsp_cmd_create_surface,
+ sizeof(*request),
+ sizeof(*response),
+ (void**)&request,
+ (void**)&response);
+
+ if (ret != 0) {
+ return ret;
+ }
+
+ request->width = width;
+ request->height = height;
+ request->stride = stride;
+ request->format = format;
+ request->vram_offset = vigs_buffer_offset(sfc_gem->bo);
+
+ ret = vigs_comm_exec(comm);
+
+ if (ret != 0) {
+ return ret;
+ }
+
+ DRM_DEBUG_DRIVER("created = %u\n", response->id);
+
+ if (id) {
+ *id = response->id;
+ }
+
+ return 0;
+}
+
+int vigs_comm_destroy_surface(struct vigs_comm *comm, vigsp_surface_id id)
+{
+ int ret;
+ struct vigsp_cmd_destroy_surface_request *request;
+
+ DRM_DEBUG_DRIVER("id = %u\n", id);
+
+ ret = vigs_comm_prepare(comm,
+ vigsp_cmd_destroy_surface,
+ sizeof(*request),
+ 0,
+ (void**)&request,
+ NULL);
+
+ if (ret != 0) {
+ return ret;
+ }
+
+ request->id = id;
+
+ return vigs_comm_exec(comm);
+}
+
+int vigs_comm_set_root_surface(struct vigs_comm *comm, vigsp_surface_id id)
+{
+ int ret;
+ struct vigsp_cmd_set_root_surface_request *request;
+
+ DRM_DEBUG_DRIVER("id = %u\n", id);
+
+ ret = vigs_comm_prepare(comm,
+ vigsp_cmd_set_root_surface,
+ sizeof(*request),
+ 0,
+ (void**)&request,
+ NULL);
+
+ if (ret != 0) {
+ return ret;
+ }
+
+ request->id = id;
+
+ return vigs_comm_exec(comm);
+}
+
+int vigs_comm_get_protocol_version_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vigs_get_protocol_version *args = data;
+
+ args->version = VIGS_PROTOCOL_VERSION;
+
+ return 0;
+}
--- /dev/null
+#ifndef _VIGS_COMM_H_
+#define _VIGS_COMM_H_
+
+#include <linux/types.h>
+#include "vigs_protocol.h"
+
+struct drm_device;
+struct drm_file;
+struct vigs_device;
+struct vigs_gem_object;
+
+struct vigs_comm
+{
+ struct vigs_device *vigs_dev;
+
+ /*
+ * From vigs_device::io_map::handle for speed.
+ */
+ void __iomem *io_ptr;
+
+ struct vigs_gem_object *cmd_gem;
+};
+
+int vigs_comm_create(struct vigs_device *vigs_dev,
+ struct vigs_comm **comm);
+
+void vigs_comm_destroy(struct vigs_comm *comm);
+
+int vigs_comm_reset(struct vigs_comm *comm);
+
+int vigs_comm_create_surface(struct vigs_comm *comm,
+ unsigned int width,
+ unsigned int height,
+ unsigned int stride,
+ vigsp_surface_format format,
+ struct vigs_gem_object *sfc_gem,
+ vigsp_surface_id *id);
+
+int vigs_comm_destroy_surface(struct vigs_comm *comm, vigsp_surface_id id);
+
+int vigs_comm_set_root_surface(struct vigs_comm *comm, vigsp_surface_id id);
+
+/*
+ * IOCTLs
+ * @{
+ */
+
+int vigs_comm_get_protocol_version_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv);
+
+/*
+ * @}
+ */
+
+#endif
--- /dev/null
+#include "vigs_crtc.h"
+#include "vigs_device.h"
+#include "vigs_framebuffer.h"
+#include "vigs_comm.h"
+#include "drm_crtc_helper.h"
+
+struct vigs_crtc
+{
+ struct drm_crtc base;
+};
+
+static inline struct vigs_crtc *crtc_to_vigs_crtc(struct drm_crtc *crtc)
+{
+ return container_of(crtc, struct vigs_crtc, base);
+}
+
+static void vigs_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct vigs_crtc *vigs_crtc = crtc_to_vigs_crtc(crtc);
+
+ DRM_DEBUG_KMS("enter");
+
+ drm_crtc_cleanup(crtc);
+
+ kfree(vigs_crtc);
+}
+
+static void vigs_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ DRM_DEBUG_KMS("enter: mode = %d\n", mode);
+}
+
+static bool vigs_crtc_mode_fixup(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ DRM_DEBUG_KMS("enter\n");
+
+ return true;
+}
+
+static int vigs_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct vigs_device *vigs_dev = crtc->dev->dev_private;
+ struct vigs_framebuffer *vigs_fb;
+ int ret;
+
+ /*
+ * New framebuffer has been attached, notify the host that
+ * root surface has been updated.
+ */
+
+ DRM_DEBUG_KMS("enter: x = %d, y = %d\n", x, y);
+
+ if (!crtc->fb) {
+ DRM_ERROR("crtc->fb is NULL\n");
+ return -EINVAL;
+ }
+
+ vigs_fb = fb_to_vigs_fb(crtc->fb);
+
+ ret = vigs_comm_set_root_surface(vigs_dev->comm, vigs_fb->sfc_id);
+
+ if (ret != 0) {
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vigs_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ DRM_DEBUG_KMS("enter: x = %d, y = %d\n", x, y);
+
+ return vigs_crtc_mode_set_base(crtc, x, y, old_fb);
+}
+
+static void vigs_crtc_prepare(struct drm_crtc *crtc)
+{
+ DRM_DEBUG_KMS("enter\n");
+}
+
+static void vigs_crtc_commit(struct drm_crtc *crtc)
+{
+ DRM_DEBUG_KMS("enter\n");
+}
+
+static void vigs_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+static void vigs_crtc_disable(struct drm_crtc *crtc)
+{
+ struct vigs_device *vigs_dev = crtc->dev->dev_private;
+
+ /*
+ * Framebuffer has been detached, notify the host that
+ * root surface is gone.
+ */
+
+ DRM_DEBUG_KMS("enter\n");
+
+ if (!crtc->fb) {
+ /*
+ * No current framebuffer, no need to notify the host.
+ */
+
+ return;
+ }
+
+ vigs_comm_set_root_surface(vigs_dev->comm, 0);
+}
+
+static const struct drm_crtc_funcs vigs_crtc_funcs =
+{
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = vigs_crtc_destroy,
+};
+
+static const struct drm_crtc_helper_funcs vigs_crtc_helper_funcs =
+{
+ .dpms = vigs_crtc_dpms,
+ .mode_fixup = vigs_crtc_mode_fixup,
+ .mode_set = vigs_crtc_mode_set,
+ .mode_set_base = vigs_crtc_mode_set_base,
+ .prepare = vigs_crtc_prepare,
+ .commit = vigs_crtc_commit,
+ .load_lut = vigs_crtc_load_lut,
+ .disable = vigs_crtc_disable,
+};
+
+int vigs_crtc_init(struct vigs_device *vigs_dev)
+{
+ struct vigs_crtc *vigs_crtc;
+ int ret;
+
+ DRM_DEBUG_KMS("enter\n");
+
+ vigs_crtc = kzalloc(sizeof(*vigs_crtc), GFP_KERNEL);
+
+ if (!vigs_crtc) {
+ return -ENOMEM;
+ }
+
+ ret = drm_crtc_init(vigs_dev->drm_dev,
+ &vigs_crtc->base,
+ &vigs_crtc_funcs);
+
+ if (ret != 0) {
+ kfree(vigs_crtc);
+ return ret;
+ }
+
+ drm_crtc_helper_add(&vigs_crtc->base, &vigs_crtc_helper_funcs);
+
+ return 0;
+}
--- /dev/null
+#ifndef _VIGS_CRTC_H_
+#define _VIGS_CRTC_H_
+
+#include "drmP.h"
+
+struct vigs_device;
+
+int vigs_crtc_init(struct vigs_device *vigs_dev);
+
+#endif
--- /dev/null
+#include "vigs_device.h"
+#include "vigs_mman.h"
+#include "vigs_crtc.h"
+#include "vigs_output.h"
+#include "vigs_framebuffer.h"
+#include "vigs_comm.h"
+#include "vigs_fbdev.h"
+#include <drm/vigs_drm.h>
+
+int vigs_device_init(struct vigs_device *vigs_dev,
+ struct drm_device *drm_dev,
+ struct pci_dev *pci_dev,
+ unsigned long flags)
+{
+ int ret;
+
+ DRM_DEBUG_DRIVER("enter\n");
+
+ vigs_dev->dev = &pci_dev->dev;
+ vigs_dev->drm_dev = drm_dev;
+ vigs_dev->pci_dev = pci_dev;
+
+ vigs_dev->vram_base = pci_resource_start(pci_dev, 0);
+ vigs_dev->vram_size = pci_resource_len(pci_dev, 0);
+
+ vigs_dev->ram_base = pci_resource_start(pci_dev, 1);
+ vigs_dev->ram_size = pci_resource_len(pci_dev, 1);
+
+ vigs_dev->io_base = pci_resource_start(pci_dev, 2);
+ vigs_dev->io_size = pci_resource_len(pci_dev, 2);
+
+ if (!vigs_dev->vram_base || !vigs_dev->ram_base || !vigs_dev->io_base) {
+ DRM_ERROR("VRAM, RAM or IO bar not found on device\n");
+ ret = -ENODEV;
+ goto fail1;
+ }
+
+ if ((vigs_dev->io_size < VIGS_REGS_SIZE) ||
+ ((vigs_dev->io_size % VIGS_REGS_SIZE) != 0)) {
+ DRM_ERROR("IO bar has bad size: %u bytes\n", vigs_dev->io_size);
+ ret = -ENODEV;
+ goto fail1;
+ }
+
+ ret = drm_addmap(vigs_dev->drm_dev,
+ vigs_dev->io_base,
+ vigs_dev->io_size,
+ _DRM_REGISTERS,
+ 0,
+ &vigs_dev->io_map);
+ if (ret != 0) {
+ goto fail1;
+ }
+
+ ret = vigs_mman_create(vigs_dev->vram_base, vigs_dev->vram_size,
+ vigs_dev->ram_base, vigs_dev->ram_size,
+ &vigs_dev->mman);
+
+ if (ret != 0) {
+ goto fail2;
+ }
+
+ vigs_dev->user_map_length = (vigs_dev->io_size / VIGS_REGS_SIZE);
+
+ vigs_dev->user_map =
+ kzalloc((sizeof(*vigs_dev->user_map) * vigs_dev->user_map_length),
+ GFP_KERNEL);
+
+ if (!vigs_dev->user_map) {
+ ret = -ENOMEM;
+ goto fail3;
+ }
+
+ mutex_init(&vigs_dev->user_mutex);
+
+ ret = vigs_comm_create(vigs_dev, &vigs_dev->comm);
+
+ if (ret != 0) {
+ goto fail4;
+ }
+
+ drm_mode_config_init(vigs_dev->drm_dev);
+
+ vigs_framebuffer_config_init(vigs_dev);
+
+ ret = vigs_crtc_init(vigs_dev);
+
+ if (ret != 0) {
+ goto fail5;
+ }
+
+ ret = vigs_output_init(vigs_dev);
+
+ if (ret != 0) {
+ goto fail5;
+ }
+
+ ret = vigs_fbdev_create(vigs_dev, &vigs_dev->fbdev);
+
+ if (ret != 0) {
+ goto fail5;
+ }
+
+ return 0;
+
+fail5:
+ drm_mode_config_cleanup(vigs_dev->drm_dev);
+ vigs_comm_destroy(vigs_dev->comm);
+fail4:
+ kfree(vigs_dev->user_map);
+fail3:
+ vigs_mman_destroy(vigs_dev->mman);
+fail2:
+ drm_rmmap(vigs_dev->drm_dev, vigs_dev->io_map);
+fail1:
+
+ return ret;
+}
+
+void vigs_device_cleanup(struct vigs_device *vigs_dev)
+{
+ DRM_DEBUG_DRIVER("enter\n");
+
+ vigs_fbdev_destroy(vigs_dev->fbdev);
+ drm_mode_config_cleanup(vigs_dev->drm_dev);
+ vigs_comm_destroy(vigs_dev->comm);
+ kfree(vigs_dev->user_map);
+ vigs_mman_destroy(vigs_dev->mman);
+ drm_rmmap(vigs_dev->drm_dev, vigs_dev->io_map);
+}
+
+int vigs_device_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct vigs_device *vigs_dev = file_priv->minor->dev->dev_private;
+
+ if (vigs_dev == NULL) {
+ DRM_ERROR("no device\n");
+ return -EINVAL;
+ }
+
+ return vigs_mman_mmap(vigs_dev->mman, filp, vma);
+}
+
+int vigs_device_user_enter_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct vigs_device *vigs_dev = drm_dev->dev_private;
+ struct drm_vigs_user_enter *args = data;
+ int i;
+ int index = -1;
+
+ mutex_lock(&vigs_dev->user_mutex);
+
+ for (i = 0; i < vigs_dev->user_map_length; ++i) {
+ if (!vigs_dev->user_map[i]) {
+ index = i;
+ vigs_dev->user_map[i] = file_priv;
+ break;
+ }
+ }
+
+ if (index == -1) {
+ DRM_ERROR("no more free user slots\n");
+ mutex_unlock(&vigs_dev->user_mutex);
+ return -ENOSPC;
+ }
+
+#if defined(__i386__) || defined(__x86_64__)
+ /*
+ * Write CR registers.
+ * @{
+ */
+
+ writel(read_cr0(), VIGS_USER_PTR(vigs_dev->io_map->handle, index) + VIGS_REG_CR0);
+ writel(0, VIGS_USER_PTR(vigs_dev->io_map->handle, index) + VIGS_REG_CR1);
+ writel(read_cr2(), VIGS_USER_PTR(vigs_dev->io_map->handle, index) + VIGS_REG_CR2);
+ writel(read_cr3(), VIGS_USER_PTR(vigs_dev->io_map->handle, index) + VIGS_REG_CR3);
+ writel(read_cr4(), VIGS_USER_PTR(vigs_dev->io_map->handle, index) + VIGS_REG_CR4);
+
+ /*
+ * @}
+ */
+#endif
+
+ mutex_unlock(&vigs_dev->user_mutex);
+
+ args->index = index;
+
+ DRM_DEBUG_DRIVER("user %u entered\n", args->index);
+
+ return 0;
+}
+
+int vigs_device_user_leave_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct vigs_device *vigs_dev = drm_dev->dev_private;
+ struct drm_vigs_user_leave *args = data;
+
+ if (args->index >= vigs_dev->user_map_length) {
+ DRM_ERROR("invalid index: %u\n", args->index);
+ return -EINVAL;
+ }
+
+ mutex_lock(&vigs_dev->user_mutex);
+
+ if (vigs_dev->user_map[args->index] != file_priv) {
+ DRM_ERROR("user doesn't own index %u\n", args->index);
+ mutex_unlock(&vigs_dev->user_mutex);
+ return -EINVAL;
+ }
+
+ vigs_dev->user_map[args->index] = NULL;
+
+ mutex_unlock(&vigs_dev->user_mutex);
+
+ DRM_DEBUG_DRIVER("user %u left\n", args->index);
+
+ return 0;
+}
+
+void vigs_device_user_leave_all(struct vigs_device *vigs_dev,
+ struct drm_file *file_priv)
+{
+ int i;
+
+ mutex_lock(&vigs_dev->user_mutex);
+
+ for (i = 0; i < vigs_dev->user_map_length; ++i) {
+ if (vigs_dev->user_map[i] == file_priv) {
+ vigs_dev->user_map[i] = NULL;
+ DRM_DEBUG_DRIVER("user %d left\n", i);
+ }
+ }
+
+ mutex_unlock(&vigs_dev->user_mutex);
+}
--- /dev/null
+#ifndef _VIGS_DEVICE_H_
+#define _VIGS_DEVICE_H_
+
+#include "drmP.h"
+
+struct vigs_mman;
+struct vigs_comm;
+struct vigs_fbdev;
+
+#define VIGS_REG_RAM_OFFSET 0
+#define VIGS_REG_CR0 8
+#define VIGS_REG_CR1 16
+#define VIGS_REG_CR2 24
+#define VIGS_REG_CR3 32
+#define VIGS_REG_CR4 40
+#define VIGS_REGS_SIZE 64
+
+#define VIGS_USER_PTR(io_ptr, index) ((io_ptr) + ((index) * VIGS_REGS_SIZE))
+
+struct vigs_device
+{
+ struct device *dev;
+ struct drm_device *drm_dev;
+ struct pci_dev *pci_dev;
+
+ resource_size_t vram_base;
+ resource_size_t vram_size;
+
+ resource_size_t ram_base;
+ resource_size_t ram_size;
+
+ resource_size_t io_base;
+ resource_size_t io_size;
+
+ /* Map of IO BAR. */
+ drm_local_map_t *io_map;
+
+ struct vigs_mman *mman;
+
+ /* slot contains DRM file pointer if user is active, NULL if slot can be used. */
+ struct drm_file **user_map;
+
+ /* Length of 'user_map'. Must be at least 1. */
+ int user_map_length;
+
+ /* Mutex used to serialize access to user_map. */
+ struct mutex user_mutex;
+
+ /* Communicator instance for kernel itself, takes slot #0 in user_map. */
+ struct vigs_comm *comm;
+
+ struct vigs_fbdev *fbdev;
+};
+
+int vigs_device_init(struct vigs_device *vigs_dev,
+ struct drm_device *drm_dev,
+ struct pci_dev *pci_dev,
+ unsigned long flags);
+
+void vigs_device_cleanup(struct vigs_device *vigs_dev);
+
+int vigs_device_mmap(struct file *filp, struct vm_area_struct *vma);
+
+/*
+ * IOCTLs
+ * @{
+ */
+
+int vigs_device_user_enter_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv);
+
+int vigs_device_user_leave_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv);
+
+void vigs_device_user_leave_all(struct vigs_device *vigs_dev,
+ struct drm_file *file_priv);
+
+/*
+ * @}
+ */
+
+#endif
--- /dev/null
+#include "vigs_driver.h"
+#include "vigs_gem.h"
+#include "vigs_device.h"
+#include "vigs_fbdev.h"
+#include "vigs_comm.h"
+#include "vigs_framebuffer.h"
+#include "drmP.h"
+#include "drm.h"
+#include <linux/module.h>
+#include <drm/vigs_drm.h>
+
+#define PCI_VENDOR_ID_VIGS 0x19B2
+#define PCI_DEVICE_ID_VIGS 0x1011
+
+#define DRIVER_NAME "vigs"
+#define DRIVER_DESC "VIGS DRM"
+#define DRIVER_DATE "20121102"
+#define DRIVER_MAJOR DRM_VIGS_DRIVER_VERSION
+#define DRIVER_MINOR 0
+
+static struct pci_device_id vigs_pci_table[] __devinitdata =
+{
+ {
+ .vendor = PCI_VENDOR_ID_VIGS,
+ .device = PCI_DEVICE_ID_VIGS,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, vigs_pci_table);
+
+static struct drm_ioctl_desc vigs_drm_ioctls[] =
+{
+ DRM_IOCTL_DEF_DRV(VIGS_GET_PROTOCOL_VERSION, vigs_comm_get_protocol_version_ioctl,
+ DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIGS_GEM_CREATE, vigs_gem_create_ioctl,
+ DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIGS_GEM_MMAP, vigs_gem_mmap_ioctl,
+ DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIGS_GEM_INFO, vigs_gem_info_ioctl,
+ DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIGS_USER_ENTER, vigs_device_user_enter_ioctl,
+ DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIGS_USER_LEAVE, vigs_device_user_leave_ioctl,
+ DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIGS_FB_INFO, vigs_framebuffer_info_ioctl,
+ DRM_UNLOCKED | DRM_AUTH),
+};
+
+static const struct file_operations vigs_drm_driver_fops =
+{
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .mmap = vigs_device_mmap,
+};
+
+static int vigs_drm_load(struct drm_device *dev, unsigned long flags)
+{
+ int ret = 0;
+ struct vigs_device *vigs_dev = NULL;
+
+ DRM_DEBUG_DRIVER("enter\n");
+
+ vigs_dev = kzalloc(sizeof(*vigs_dev), GFP_KERNEL);
+
+ if (vigs_dev == NULL) {
+ DRM_ERROR("failed to allocate VIGS device\n");
+ return -ENOMEM;
+ }
+
+ dev->dev_private = vigs_dev;
+
+ ret = vigs_device_init(vigs_dev, dev, dev->pdev, flags);
+
+ if (ret != 0) {
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ kfree(vigs_dev);
+
+ return ret;
+}
+
+static int vigs_drm_unload(struct drm_device *dev)
+{
+ struct vigs_device *vigs_dev = dev->dev_private;
+
+ DRM_DEBUG_DRIVER("enter\n");
+
+ vigs_device_cleanup(vigs_dev);
+
+ kfree(dev->dev_private);
+ dev->dev_private = NULL;
+
+ return 0;
+}
+
+static void vigs_drm_postclose(struct drm_device *dev,
+ struct drm_file *file_priv)
+{
+ struct vigs_device *vigs_dev = dev->dev_private;
+
+ DRM_DEBUG_DRIVER("enter\n");
+
+ vigs_device_user_leave_all(vigs_dev, file_priv);
+}
+
+static void vigs_drm_lastclose(struct drm_device *dev)
+{
+ struct vigs_device *vigs_dev = dev->dev_private;
+
+ DRM_DEBUG_DRIVER("enter\n");
+
+ if (vigs_dev->fbdev) {
+ vigs_fbdev_restore_mode(vigs_dev->fbdev);
+ }
+
+ vigs_comm_reset(vigs_dev->comm);
+}
+
+static struct drm_driver vigs_drm_driver =
+{
+ .driver_features = DRIVER_GEM | DRIVER_MODESET,
+ .load = vigs_drm_load,
+ .unload = vigs_drm_unload,
+ .postclose = vigs_drm_postclose,
+ .lastclose = vigs_drm_lastclose,
+ .gem_init_object = vigs_gem_init_object,
+ .gem_free_object = vigs_gem_free_object,
+ .gem_open_object = vigs_gem_open_object,
+ .gem_close_object = vigs_gem_close_object,
+ .dumb_create = vigs_gem_dumb_create,
+ .dumb_map_offset = vigs_gem_dumb_map_offset,
+ .dumb_destroy = vigs_gem_dumb_destroy,
+ .ioctls = vigs_drm_ioctls,
+ .num_ioctls = DRM_ARRAY_SIZE(vigs_drm_ioctls),
+ .fops = &vigs_drm_driver_fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+};
+
+static int __devinit vigs_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ return drm_get_pci_dev(pdev, ent, &vigs_drm_driver);
+}
+
+static void vigs_pci_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ drm_put_dev(dev);
+}
+
+static struct pci_driver vigs_pci_driver =
+{
+ .name = DRIVER_NAME,
+ .id_table = vigs_pci_table,
+ .probe = vigs_pci_probe,
+ .remove = __devexit_p(vigs_pci_remove),
+};
+
+int vigs_driver_register(void)
+{
+ return drm_pci_init(&vigs_drm_driver, &vigs_pci_driver);
+}
+
+void vigs_driver_unregister(void)
+{
+ drm_pci_exit(&vigs_drm_driver, &vigs_pci_driver);
+}
--- /dev/null
+#ifndef _VIGS_DRIVER_H_
+#define _VIGS_DRIVER_H_
+
+#include <linux/types.h>
+
+int vigs_driver_register(void);
+
+void vigs_driver_unregister(void);
+
+#endif
--- /dev/null
+#include "vigs_fbdev.h"
+#include "vigs_device.h"
+#include "vigs_gem.h"
+#include "vigs_buffer.h"
+#include "vigs_framebuffer.h"
+#include "vigs_output.h"
+#include "drm_crtc_helper.h"
+#include <drm/vigs_drm.h>
+
+/*
+ * From drm_fb_helper.c, modified to work with 'regno' > 16.
+ * @{
+ */
+
+static int vigs_fbdev_setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, u16 regno, struct fb_info *fbi)
+{
+ struct drm_fb_helper *fb_helper = fbi->par;
+ struct drm_framebuffer *fb = fb_helper->fb;
+ int pindex;
+
+ if (fbi->fix.visual == FB_VISUAL_TRUECOLOR) {
+ u32 *palette;
+ u32 value;
+ /* place color in psuedopalette */
+ if (regno <= 16) {
+ palette = (u32*)fbi->pseudo_palette;
+ red >>= (16 - fbi->var.red.length);
+ green >>= (16 - fbi->var.green.length);
+ blue >>= (16 - fbi->var.blue.length);
+ value = (red << fbi->var.red.offset) |
+ (green << fbi->var.green.offset) |
+ (blue << fbi->var.blue.offset);
+ if (fbi->var.transp.length > 0) {
+ u32 mask = (1 << fbi->var.transp.length) - 1;
+ mask <<= fbi->var.transp.offset;
+ value |= mask;
+ }
+ palette[regno] = value;
+ }
+ return 0;
+ }
+
+ pindex = regno;
+
+ if (fb->bits_per_pixel == 16) {
+ pindex = regno << 3;
+
+ if ((fb->depth == 16) && (regno > 63)) {
+ return -EINVAL;
+ }
+
+ if ((fb->depth == 15) && (regno > 31)) {
+ return -EINVAL;
+ }
+
+ if (fb->depth == 16) {
+ u16 r, g, b;
+ int i;
+
+ if (regno < 32) {
+ for (i = 0; i < 8; i++) {
+ fb_helper->funcs->gamma_set(crtc, red,
+ green, blue, pindex + i);
+ }
+ }
+
+ fb_helper->funcs->gamma_get(crtc, &r,
+ &g, &b,
+ (pindex >> 1));
+
+ for (i = 0; i < 4; i++) {
+ fb_helper->funcs->gamma_set(crtc, r,
+ green, b,
+ (pindex >> 1) + i);
+ }
+ }
+ }
+
+ if (fb->depth != 16) {
+ fb_helper->funcs->gamma_set(crtc, red, green, blue, pindex);
+ }
+
+ return 0;
+}
+
+static int vigs_fbdev_setcmap(struct fb_cmap *cmap, struct fb_info *fbi)
+{
+ struct drm_fb_helper *fb_helper = fbi->par;
+ struct drm_crtc_helper_funcs *crtc_funcs;
+ u16 *red, *green, *blue, *transp;
+ struct drm_crtc *crtc;
+ int i, j, ret = 0;
+ int start;
+
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
+ crtc_funcs = crtc->helper_private;
+
+ red = cmap->red;
+ green = cmap->green;
+ blue = cmap->blue;
+ transp = cmap->transp;
+ start = cmap->start;
+
+ for (j = 0; j < cmap->len; j++) {
+ u16 hred, hgreen, hblue, htransp = 0xffff;
+
+ hred = *red++;
+ hgreen = *green++;
+ hblue = *blue++;
+
+ if (transp) {
+ htransp = *transp++;
+ }
+
+ ret = vigs_fbdev_setcolreg(crtc, hred, hgreen, hblue, start++, fbi);
+
+ if (ret != 0) {
+ return ret;
+ }
+ }
+
+ crtc_funcs->load_lut(crtc);
+ }
+
+ return ret;
+}
+
+/*
+ * @}
+ */
+
+int vigs_fbdev_set_par(struct fb_info *fbi)
+{
+ DRM_DEBUG_KMS("enter\n");
+
+ return drm_fb_helper_set_par(fbi);
+}
+
+static struct fb_ops vigs_fbdev_ops =
+{
+ .owner = THIS_MODULE,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = vigs_fbdev_set_par,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_setcmap = vigs_fbdev_setcmap,
+ .fb_debug_enter = drm_fb_helper_debug_enter,
+ .fb_debug_leave = drm_fb_helper_debug_leave,
+};
+
+static int vigs_fbdev_probe_once(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct vigs_device *vigs_dev = helper->dev->dev_private;
+ struct vigs_gem_object *fb_gem;
+ struct vigs_framebuffer *vigs_fb;
+ struct fb_info *fbi;
+ struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+ unsigned long offset;
+ int dpi;
+ int ret;
+ struct drm_connector *connector;
+
+ DRM_DEBUG_KMS("%dx%dx%d\n",
+ sizes->surface_width,
+ sizes->surface_height,
+ sizes->surface_bpp);
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ mode_cmd.pitches[0] = sizes->surface_width * (sizes->surface_bpp >> 3);
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ fbi = framebuffer_alloc(0, &vigs_dev->pci_dev->dev);
+
+ if (!fbi) {
+ DRM_ERROR("failed to allocate fb info\n");
+ ret = -ENOMEM;
+ goto fail1;
+ }
+
+ ret = vigs_gem_create(vigs_dev,
+ (mode_cmd.pitches[0] * mode_cmd.height),
+ false,
+ DRM_VIGS_GEM_DOMAIN_VRAM,
+ &fb_gem);
+
+ if (ret != 0) {
+ goto fail2;
+ }
+
+ ret = vigs_framebuffer_create(vigs_dev,
+ &mode_cmd,
+ fb_gem,
+ &vigs_fb);
+
+ drm_gem_object_unreference_unlocked(&fb_gem->base);
+
+ if (ret != 0) {
+ goto fail2;
+ }
+
+ helper->fb = &vigs_fb->base;
+ helper->fbdev = fbi;
+
+ fbi->par = helper;
+ fbi->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
+ fbi->fbops = &vigs_fbdev_ops;
+
+ ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
+
+ if (ret != 0) {
+ DRM_ERROR("failed to allocate cmap\n");
+ goto fail3;
+ }
+
+ ret = vigs_buffer_kmap(fb_gem->bo);
+
+ if (ret != 0) {
+ DRM_ERROR("unable to kmap framebuffer GEM\n");
+ goto fail4;
+ }
+
+ strcpy(fbi->fix.id, "VIGS");
+
+ drm_fb_helper_fill_fix(fbi, vigs_fb->base.pitches[0], vigs_fb->base.depth);
+ drm_fb_helper_fill_var(fbi, helper, vigs_fb->base.width, vigs_fb->base.height);
+
+ /*
+ * Setup DPI.
+ * @{
+ */
+
+ dpi = vigs_output_get_dpi();
+ fbi->var.height = vigs_output_get_phys_height(dpi, fbi->var.yres);
+ fbi->var.width = vigs_output_get_phys_width(dpi, fbi->var.xres);
+
+ /*
+ * Walk all connectors and set display_info.
+ */
+
+ list_for_each_entry(connector, &vigs_dev->drm_dev->mode_config.connector_list, head) {
+ connector->display_info.width_mm = fbi->var.width;
+ connector->display_info.height_mm = fbi->var.height;
+ }
+
+ /*
+ * @}
+ */
+
+ /*
+ * TODO: Play around with xoffset/yoffset, make sure this code works.
+ */
+
+ offset = fbi->var.xoffset * (vigs_fb->base.bits_per_pixel >> 3);
+ offset += fbi->var.yoffset * vigs_fb->base.pitches[0];
+
+ /*
+ * TODO: "vram_base + ..." - not nice, make a function for this.
+ */
+ fbi->fix.smem_start = vigs_dev->vram_base +
+ vigs_buffer_offset(fb_gem->bo) +
+ offset;
+ fbi->screen_base = fb_gem->bo->kptr + offset;
+ fbi->screen_size = fbi->fix.smem_len = vigs_fb->base.width *
+ vigs_fb->base.height *
+ (vigs_fb->base.bits_per_pixel >> 3);
+
+ return 0;
+
+fail4:
+ fb_dealloc_cmap(&fbi->cmap);
+fail3:
+ helper->fb = NULL;
+ helper->fbdev = NULL;
+fail2:
+ framebuffer_release(fbi);
+fail1:
+
+ return ret;
+}
+
+static int vigs_fbdev_probe(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ int ret = 0;
+
+ DRM_DEBUG_KMS("enter\n");
+
+ /*
+ * With !helper->fb, it means that this funcion is called first time
+ * and after that, the helper->fb would be used as clone mode.
+ */
+
+ if (!helper->fb) {
+ ret = vigs_fbdev_probe_once(helper, sizes);
+
+ if (ret >= 0) {
+ ret = 1;
+ }
+ }
+
+ return ret;
+}
+
+static struct drm_fb_helper_funcs vigs_fbdev_funcs =
+{
+ .fb_probe = vigs_fbdev_probe,
+};
+
+int vigs_fbdev_create(struct vigs_device *vigs_dev,
+ struct vigs_fbdev **vigs_fbdev)
+{
+ int ret = 0;
+
+ DRM_DEBUG_KMS("enter\n");
+
+ *vigs_fbdev = kzalloc(sizeof(**vigs_fbdev), GFP_KERNEL);
+
+ if (!*vigs_fbdev) {
+ ret = -ENOMEM;
+ goto fail1;
+ }
+
+ (*vigs_fbdev)->base.funcs = &vigs_fbdev_funcs;
+
+ ret = drm_fb_helper_init(vigs_dev->drm_dev,
+ &(*vigs_fbdev)->base,
+ 1, 1);
+
+ if (ret != 0) {
+ DRM_ERROR("unable to init fb_helper: %d\n", ret);
+ goto fail2;
+ }
+
+ drm_fb_helper_single_add_all_connectors(&(*vigs_fbdev)->base);
+ drm_fb_helper_initial_config(&(*vigs_fbdev)->base, 32);
+
+ return 0;
+
+fail2:
+ kfree(*vigs_fbdev);
+fail1:
+ *vigs_fbdev = NULL;
+
+ return ret;
+}
+
+void vigs_fbdev_destroy(struct vigs_fbdev *vigs_fbdev)
+{
+ struct fb_info *fbi = vigs_fbdev->base.fbdev;
+
+ DRM_DEBUG_KMS("enter\n");
+
+ if (fbi) {
+ unregister_framebuffer(fbi);
+ fb_dealloc_cmap(&fbi->cmap);
+ framebuffer_release(fbi);
+ }
+
+ drm_fb_helper_fini(&vigs_fbdev->base);
+
+ kfree(vigs_fbdev);
+}
+
+void vigs_fbdev_output_poll_changed(struct vigs_fbdev *vigs_fbdev)
+{
+ DRM_DEBUG_KMS("enter\n");
+
+ drm_fb_helper_hotplug_event(&vigs_fbdev->base);
+}
+
+void vigs_fbdev_restore_mode(struct vigs_fbdev *vigs_fbdev)
+{
+ DRM_DEBUG_KMS("enter\n");
+
+ drm_fb_helper_restore_fbdev_mode(&vigs_fbdev->base);
+}
--- /dev/null
+#ifndef _VIGS_FBDEV_H_
+#define _VIGS_FBDEV_H_
+
+#include "drmP.h"
+#include "drm_fb_helper.h"
+
+struct vigs_device;
+
+struct vigs_fbdev
+{
+ struct drm_fb_helper base;
+};
+
+static inline struct vigs_fbdev *fbdev_to_vigs_fbdev(struct drm_fb_helper *fbdev)
+{
+ return container_of(fbdev, struct vigs_fbdev, base);
+}
+
+int vigs_fbdev_create(struct vigs_device *vigs_dev,
+ struct vigs_fbdev **vigs_fbdev);
+
+void vigs_fbdev_destroy(struct vigs_fbdev *vigs_fbdev);
+
+void vigs_fbdev_output_poll_changed(struct vigs_fbdev *vigs_fbdev);
+
+void vigs_fbdev_restore_mode(struct vigs_fbdev *vigs_fbdev);
+
+#endif
--- /dev/null
+#include "vigs_framebuffer.h"
+#include "vigs_device.h"
+#include "vigs_gem.h"
+#include "vigs_fbdev.h"
+#include "vigs_comm.h"
+#include "drm_crtc_helper.h"
+#include <drm/vigs_drm.h>
+
+static struct drm_framebuffer *vigs_fb_create(struct drm_device *drm_dev,
+ struct drm_file *file_priv,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct vigs_device *vigs_dev = drm_dev->dev_private;
+ struct drm_gem_object *gem;
+ struct vigs_framebuffer *vigs_fb;
+ int ret;
+
+ DRM_DEBUG_KMS("enter\n");
+
+ gem = drm_gem_object_lookup(drm_dev, file_priv, mode_cmd->handles[0]);
+
+ if (!gem) {
+ DRM_ERROR("GEM lookup failed, handle = %u\n", mode_cmd->handles[0]);
+ return NULL;
+ }
+
+ ret = vigs_framebuffer_create(vigs_dev,
+ mode_cmd,
+ gem_to_vigs_gem(gem),
+ &vigs_fb);
+
+ drm_gem_object_unreference_unlocked(gem);
+
+ if (ret != 0) {
+ DRM_ERROR("unable to create the framebuffer: %d\n", ret);
+ return NULL;
+ }
+
+ return &vigs_fb->base;
+}
+
+static void vigs_output_poll_changed(struct drm_device *drm_dev)
+{
+ struct vigs_device *vigs_dev = drm_dev->dev_private;
+
+ DRM_DEBUG_KMS("enter\n");
+
+ if (vigs_dev->fbdev) {
+ vigs_fbdev_output_poll_changed(vigs_dev->fbdev);
+ }
+}
+
+static void vigs_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct vigs_framebuffer *vigs_fb = fb_to_vigs_fb(fb);
+
+ DRM_DEBUG_KMS("enter\n");
+
+ /*
+ * First, we need to call 'drm_framebuffer_cleanup', this'll
+ * automatically call 'vigs_crtc_disable' if needed, thus, notifying
+ * the host that root surface is gone.
+ */
+
+ drm_framebuffer_cleanup(fb);
+
+ /*
+ * Here we can issue surface destroy command, since it's no longer
+ * root surface, but it still exists on host.
+ */
+
+ vigs_comm_destroy_surface(vigs_fb->comm, vigs_fb->sfc_id);
+
+ /*
+ * And we can finally free the GEM.
+ */
+
+ drm_gem_object_unreference_unlocked(&vigs_fb->fb_gem->base);
+ kfree(vigs_fb);
+}
+
+static int vigs_framebuffer_dirty(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned flags, unsigned color,
+ struct drm_clip_rect *clips,
+ unsigned num_clips)
+{
+ DRM_DEBUG_KMS("enter\n");
+
+ return 0;
+}
+
+static int vigs_framebuffer_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ struct vigs_framebuffer *vigs_fb = fb_to_vigs_fb(fb);
+
+ DRM_DEBUG_KMS("enter\n");
+
+ return drm_gem_handle_create(file_priv, &vigs_fb->fb_gem->base, handle);
+}
+
+static struct drm_mode_config_funcs vigs_mode_config_funcs =
+{
+ .fb_create = vigs_fb_create,
+ .output_poll_changed = vigs_output_poll_changed
+};
+
+static struct drm_framebuffer_funcs vigs_framebuffer_funcs =
+{
+ .destroy = vigs_framebuffer_destroy,
+ .create_handle = vigs_framebuffer_create_handle,
+ .dirty = vigs_framebuffer_dirty,
+};
+
+void vigs_framebuffer_config_init(struct vigs_device *vigs_dev)
+{
+ DRM_DEBUG_KMS("enter\n");
+
+ vigs_dev->drm_dev->mode_config.min_width = 0;
+ vigs_dev->drm_dev->mode_config.min_height = 0;
+
+ vigs_dev->drm_dev->mode_config.max_width = 4096;
+ vigs_dev->drm_dev->mode_config.max_height = 4096;
+
+ vigs_dev->drm_dev->mode_config.funcs = &vigs_mode_config_funcs;
+}
+
+int vigs_framebuffer_create(struct vigs_device *vigs_dev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct vigs_gem_object *fb_gem,
+ struct vigs_framebuffer **vigs_fb)
+{
+ int ret = 0;
+
+ DRM_DEBUG_KMS("enter\n");
+
+ *vigs_fb = kzalloc(sizeof(**vigs_fb), GFP_KERNEL);
+
+ if (!*vigs_fb) {
+ ret = -ENOMEM;
+ goto fail1;
+ }
+
+ switch (mode_cmd->pixel_format) {
+ case DRM_FORMAT_XRGB8888:
+ (*vigs_fb)->format = vigsp_surface_bgrx8888;
+ break;
+ case DRM_FORMAT_ARGB8888:
+ (*vigs_fb)->format = vigsp_surface_bgra8888;
+ break;
+ default:
+ DRM_DEBUG_KMS("unsupported pixel format: %u\n", mode_cmd->pixel_format);
+ ret = -EINVAL;
+ goto fail2;
+ }
+
+ ret = vigs_comm_create_surface(vigs_dev->comm,
+ mode_cmd->width,
+ mode_cmd->height,
+ mode_cmd->pitches[0],
+ (*vigs_fb)->format,
+ fb_gem,
+ &(*vigs_fb)->sfc_id);
+
+ if (ret != 0) {
+ goto fail2;
+ }
+
+ (*vigs_fb)->comm = vigs_dev->comm;
+ (*vigs_fb)->fb_gem = fb_gem;
+
+ ret = drm_framebuffer_init(vigs_dev->drm_dev,
+ &(*vigs_fb)->base,
+ &vigs_framebuffer_funcs);
+
+ if (ret != 0) {
+ goto fail3;
+ }
+
+ drm_helper_mode_fill_fb_struct(&(*vigs_fb)->base, mode_cmd);
+
+ drm_gem_object_reference(&fb_gem->base);
+
+ return 0;
+
+fail3:
+ vigs_comm_destroy_surface(vigs_dev->comm, (*vigs_fb)->sfc_id);
+fail2:
+ kfree(*vigs_fb);
+fail1:
+ *vigs_fb = NULL;
+
+ return ret;
+}
+
+int vigs_framebuffer_info_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vigs_fb_info *args = data;
+ struct drm_mode_object *obj;
+ struct drm_framebuffer *fb;
+ struct vigs_framebuffer *vigs_fb;
+
+ mutex_lock(&drm_dev->mode_config.mutex);
+
+ obj = drm_mode_object_find(drm_dev, args->fb_id, DRM_MODE_OBJECT_FB);
+
+ if (!obj) {
+ mutex_unlock(&drm_dev->mode_config.mutex);
+ return -ENOENT;
+ }
+
+ fb = obj_to_fb(obj);
+ vigs_fb = fb_to_vigs_fb(fb);
+
+ args->sfc_id = vigs_fb->sfc_id;
+
+ mutex_unlock(&drm_dev->mode_config.mutex);
+
+ return 0;
+}
--- /dev/null
+#ifndef _VIGS_FRAMEBUFFER_H_
+#define _VIGS_FRAMEBUFFER_H_
+
+#include "drmP.h"
+#include "vigs_protocol.h"
+
+struct vigs_device;
+struct vigs_comm;
+struct vigs_gem_object;
+
+struct vigs_framebuffer
+{
+ struct drm_framebuffer base;
+
+ /*
+ * Cached from 'vigs_device' for speed.
+ */
+ struct vigs_comm *comm;
+
+ vigsp_surface_format format;
+
+ struct vigs_gem_object *fb_gem;
+
+ /*
+ * Each DRM framebuffer has a surface on host, this is
+ * its id.
+ */
+ vigsp_surface_id sfc_id;
+};
+
+static inline struct vigs_framebuffer *fb_to_vigs_fb(struct drm_framebuffer *fb)
+{
+ return container_of(fb, struct vigs_framebuffer, base);
+}
+
+void vigs_framebuffer_config_init(struct vigs_device *vigs_dev);
+
+/*
+ * Creates a framebuffer object.
+ * Note that it also gets a reference to 'fb_gem' (in case of success), so
+ * don't forget to unreference it in the calling code.
+ */
+int vigs_framebuffer_create(struct vigs_device *vigs_dev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct vigs_gem_object *fb_gem,
+ struct vigs_framebuffer **vigs_fb);
+
+/*
+ * IOCTLs
+ * @{
+ */
+
+int vigs_framebuffer_info_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv);
+
+/*
+ * @}
+ */
+
+#endif
--- /dev/null
+#include "vigs_gem.h"
+#include "vigs_buffer.h"
+#include "vigs_device.h"
+#include <drm/vigs_drm.h>
+
+int vigs_gem_create(struct vigs_device *vigs_dev,
+ unsigned long size,
+ bool kernel,
+ u32 domain,
+ struct vigs_gem_object **vigs_gem)
+{
+ int ret = 0;
+
+ size = roundup(size, PAGE_SIZE);
+
+ if (size == 0) {
+ return -EINVAL;
+ }
+
+ *vigs_gem = kzalloc(sizeof(**vigs_gem), GFP_KERNEL);
+
+ if (!*vigs_gem) {
+ ret = -ENOMEM;
+ goto fail1;
+ }
+
+ ret = vigs_buffer_create(vigs_dev->mman,
+ size,
+ kernel,
+ domain,
+ &(*vigs_gem)->bo);
+
+ if (ret != 0) {
+ goto fail2;
+ }
+
+ ret = drm_gem_object_init(vigs_dev->drm_dev, &(*vigs_gem)->base, size);
+
+ if (ret != 0) {
+ goto fail3;
+ }
+
+ return 0;
+
+fail3:
+ vigs_buffer_release((*vigs_gem)->bo);
+fail2:
+ kfree(*vigs_gem);
+fail1:
+ *vigs_gem = NULL;
+
+ return ret;
+}
+
+void vigs_gem_free_object(struct drm_gem_object *gem)
+{
+ struct vigs_gem_object *vigs_gem = gem_to_vigs_gem(gem);
+
+ vigs_buffer_release(vigs_gem->bo);
+
+ kfree(vigs_gem);
+}
+
+int vigs_gem_init_object(struct drm_gem_object *gem)
+{
+ return 0;
+}
+
+int vigs_gem_open_object(struct drm_gem_object *gem,
+ struct drm_file *file_priv)
+{
+ return 0;
+}
+
+void vigs_gem_close_object(struct drm_gem_object *gem,
+ struct drm_file *file_priv)
+{
+}
+
+int vigs_gem_dumb_create(struct drm_file *file_priv,
+ struct drm_device *drm_dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct vigs_device *vigs_dev = drm_dev->dev_private;
+ struct vigs_gem_object *vigs_gem = NULL;
+ uint32_t handle;
+ int ret;
+
+ args->pitch = args->width * ((args->bpp + 7) / 8);
+ args->size = args->pitch * args->height;
+ args->size = ALIGN(args->size, PAGE_SIZE);
+
+ ret = vigs_gem_create(vigs_dev,
+ args->size,
+ false,
+ DRM_VIGS_GEM_DOMAIN_VRAM,
+ &vigs_gem);
+
+ if (ret != 0) {
+ return ret;
+ }
+
+ ret = drm_gem_handle_create(file_priv,
+ &vigs_gem->base,
+ &handle);
+
+ drm_gem_object_unreference_unlocked(&vigs_gem->base);
+
+ if (ret == 0) {
+ args->handle = handle;
+ }
+
+ DRM_DEBUG_DRIVER("GEM %u created\n", handle);
+
+ return 0;
+}
+
+int vigs_gem_dumb_destroy(struct drm_file *file_priv,
+ struct drm_device *drm_dev,
+ uint32_t handle)
+{
+ DRM_DEBUG_DRIVER("destroying GEM %u\n", handle);
+
+ return drm_gem_handle_delete(file_priv, handle);
+}
+
+int vigs_gem_dumb_map_offset(struct drm_file *file_priv,
+ struct drm_device *drm_dev,
+ uint32_t handle, uint64_t *offset_p)
+{
+ struct drm_gem_object *gem;
+ struct vigs_gem_object *vigs_gem;
+
+ BUG_ON(!offset_p);
+
+ gem = drm_gem_object_lookup(drm_dev, file_priv, handle);
+
+ if (gem == NULL) {
+ return -ENOENT;
+ }
+
+ vigs_gem = gem_to_vigs_gem(gem);
+
+ *offset_p = vigs_buffer_mmap_offset(vigs_gem->bo);
+
+ drm_gem_object_unreference_unlocked(gem);
+
+ return 0;
+}
+
+int vigs_gem_create_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct vigs_device *vigs_dev = drm_dev->dev_private;
+ struct drm_vigs_gem_create *args = data;
+ struct vigs_gem_object *vigs_gem = NULL;
+ uint32_t handle;
+ int ret;
+
+ ret = vigs_gem_create(vigs_dev,
+ args->size,
+ false,
+ args->domain,
+ &vigs_gem);
+
+ if (ret != 0) {
+ return ret;
+ }
+
+ ret = drm_gem_handle_create(file_priv,
+ &vigs_gem->base,
+ &handle);
+
+ drm_gem_object_unreference_unlocked(&vigs_gem->base);
+
+ if (ret == 0) {
+ args->size = vigs_buffer_size(vigs_gem->bo);
+ args->handle = handle;
+ args->domain_offset = vigs_buffer_offset(vigs_gem->bo);
+ DRM_DEBUG_DRIVER("GEM %u created\n", handle);
+ }
+
+ return ret;
+}
+
+int vigs_gem_mmap_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vigs_gem_mmap *args = data;
+ struct drm_gem_object *gem;
+ struct vigs_gem_object *vigs_gem;
+
+ gem = drm_gem_object_lookup(drm_dev, file_priv, args->handle);
+
+ if (gem == NULL) {
+ return -ENOENT;
+ }
+
+ vigs_gem = gem_to_vigs_gem(gem);
+
+ args->offset = vigs_buffer_mmap_offset(vigs_gem->bo);
+
+ drm_gem_object_unreference_unlocked(gem);
+
+ return 0;
+}
+
+int vigs_gem_info_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vigs_gem_info *args = data;
+ struct drm_gem_object *gem;
+ struct vigs_gem_object *vigs_gem;
+
+ gem = drm_gem_object_lookup(drm_dev, file_priv, args->handle);
+
+ if (gem == NULL) {
+ return -ENOENT;
+ }
+
+ vigs_gem = gem_to_vigs_gem(gem);
+
+ args->domain = vigs_gem->bo->domain;
+ args->domain_offset = vigs_buffer_offset(vigs_gem->bo);
+
+ drm_gem_object_unreference_unlocked(gem);
+
+ return 0;
+}
--- /dev/null
+#ifndef _VIGS_GEM_H_
+#define _VIGS_GEM_H_
+
+#include "drmP.h"
+
+struct vigs_device;
+struct vigs_buffer_object;
+
+struct vigs_gem_object
+{
+ struct drm_gem_object base;
+
+ struct vigs_buffer_object *bo;
+};
+
+static inline struct vigs_gem_object *gem_to_vigs_gem(struct drm_gem_object *gem)
+{
+ return container_of(gem, struct vigs_gem_object, base);
+}
+
+/*
+ * Creates a gem object. 'size' is automatically rounded up to page size.
+ */
+int vigs_gem_create(struct vigs_device *vigs_dev,
+ unsigned long size,
+ bool kernel,
+ u32 domain,
+ struct vigs_gem_object **vigs_gem);
+
+void vigs_gem_free_object(struct drm_gem_object *gem);
+
+int vigs_gem_init_object(struct drm_gem_object *gem);
+
+int vigs_gem_open_object(struct drm_gem_object *gem,
+ struct drm_file *file_priv);
+
+void vigs_gem_close_object(struct drm_gem_object *gem,
+ struct drm_file *file_priv);
+
+/*
+ * Dumb
+ * @{
+ */
+
+int vigs_gem_dumb_create(struct drm_file *file_priv,
+ struct drm_device *drm_dev,
+ struct drm_mode_create_dumb *args);
+
+int vigs_gem_dumb_destroy(struct drm_file *file_priv,
+ struct drm_device *drm_dev,
+ uint32_t handle);
+
+int vigs_gem_dumb_map_offset(struct drm_file *file_priv,
+ struct drm_device *drm_dev,
+ uint32_t handle, uint64_t *offset_p);
+
+/*
+ * @}
+ */
+
+/*
+ * IOCTLs
+ * @{
+ */
+
+int vigs_gem_create_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv);
+
+int vigs_gem_mmap_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv);
+
+int vigs_gem_info_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv);
+
+/*
+ * @}
+ */
+
+#endif
--- /dev/null
+#include "vigs_mman.h"
+#include <ttm/ttm_placement.h>
+
+/*
+ * This is TTM-based memory manager for VIGS, it supports 3 memory placements:
+ * CPU - This is for target-only memory, not shared with host.
+ * VRAM - This gets allocated on "VRAM" PCI BAR, shared with host, typically
+ * used for surface placement.
+ * RAM - This gets allocated on "RAM" PCI BAR, shared with host, typically
+ * used for protocol commands placement.
+ *
+ * No eviction supported yet, so buffers cannot be moved between placements.
+ */
+
+/*
+ * Offsets for mmap will start at DRM_FILE_OFFSET
+ */
+#define DRM_FILE_OFFSET 0x100000000ULL
+#define DRM_FILE_PAGE_OFFSET (DRM_FILE_OFFSET >> PAGE_SHIFT)
+
+/*
+ * DRM_GLOBAL_TTM_MEM init/release thunks
+ * @{
+ */
+
+static int vigs_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+ return ttm_mem_global_init(ref->object);
+}
+
+static void vigs_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+ ttm_mem_global_release(ref->object);
+}
+
+/*
+ * @}
+ */
+
+/*
+ * Here we initialize mman::bo_global_ref and mman::mem_global_ref.
+ * This is required in order to bring up TTM bo subsystem and TTM memory
+ * subsystem if they aren't already up. The first one who
+ * calls 'drm_global_item_ref' automatically initializes the specified
+ * subsystem and the last one who calls 'drm_global_item_unref' automatically
+ * brings down the specified subsystem.
+ * @{
+ */
+
+static int vigs_mman_global_init(struct vigs_mman *mman)
+{
+ struct drm_global_reference *global_ref = NULL;
+ int ret = 0;
+
+ global_ref = &mman->mem_global_ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+ global_ref->size = sizeof(struct ttm_mem_global);
+ global_ref->init = &vigs_ttm_mem_global_init;
+ global_ref->release = &vigs_ttm_mem_global_release;
+
+ ret = drm_global_item_ref(global_ref);
+
+ if (ret != 0) {
+ DRM_ERROR("failed setting up TTM memory subsystem: %d\n", ret);
+ return ret;
+ }
+
+ mman->bo_global_ref.mem_glob = mman->mem_global_ref.object;
+ global_ref = &mman->bo_global_ref.ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_BO;
+ global_ref->size = sizeof(struct ttm_bo_global);
+ global_ref->init = &ttm_bo_global_init;
+ global_ref->release = &ttm_bo_global_release;
+
+ ret = drm_global_item_ref(global_ref);
+
+ if (ret != 0) {
+ DRM_ERROR("failed setting up TTM bo subsystem: %d\n", ret);
+ drm_global_item_unref(&mman->mem_global_ref);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void vigs_mman_global_cleanup(struct vigs_mman *mman)
+{
+ drm_global_item_unref(&mman->bo_global_ref.ref);
+ drm_global_item_unref(&mman->mem_global_ref);
+}
+
+/*
+ * @}
+ */
+
+/*
+ * TTM backend functions.
+ * @{
+ */
+
+static int vigs_ttm_backend_bind(struct ttm_tt *tt,
+ struct ttm_mem_reg *bo_mem)
+{
+ DRM_ERROR("not implemented");
+
+ return -1;
+}
+
+static int vigs_ttm_backend_unbind(struct ttm_tt *tt)
+{
+ DRM_ERROR("not implemented");
+
+ return -1;
+}
+
+static void vigs_ttm_backend_destroy(struct ttm_tt *tt)
+{
+ struct ttm_dma_tt *dma_tt = (void*)tt;
+
+ ttm_dma_tt_fini(dma_tt);
+ kfree(dma_tt);
+}
+
+static struct ttm_backend_func vigs_ttm_backend_func = {
+ .bind = &vigs_ttm_backend_bind,
+ .unbind = &vigs_ttm_backend_unbind,
+ .destroy = &vigs_ttm_backend_destroy,
+};
+
+struct ttm_tt *vigs_ttm_tt_create(struct ttm_bo_device *bo_dev,
+ unsigned long size,
+ uint32_t page_flags,
+ struct page *dummy_read_page)
+{
+ struct ttm_dma_tt *dma_tt;
+ int ret;
+
+ dma_tt = kzalloc(sizeof(struct ttm_dma_tt), GFP_KERNEL);
+
+ if (dma_tt == NULL) {
+ DRM_ERROR("cannot allocate ttm_dma_tt: OOM\n");
+ return NULL;
+ }
+
+ dma_tt->ttm.func = &vigs_ttm_backend_func;
+
+ ret = ttm_dma_tt_init(dma_tt, bo_dev, size, page_flags,
+ dummy_read_page);
+
+ if (ret != 0) {
+ DRM_ERROR("ttm_dma_tt_init failed: %d\n", ret);
+ kfree(dma_tt);
+ return NULL;
+ }
+
+ return &dma_tt->ttm;
+}
+
+/*
+ * @}
+ */
+
+static int vigs_ttm_invalidate_caches(struct ttm_bo_device *bo_dev,
+ uint32_t flags)
+{
+ return 0;
+}
+
+static int vigs_ttm_init_mem_type(struct ttm_bo_device *bo_dev,
+ uint32_t type,
+ struct ttm_mem_type_manager *man)
+{
+ switch (type) {
+ case TTM_PL_SYSTEM:
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case TTM_PL_VRAM:
+ case TTM_PL_PRIV0:
+ /*
+ * For now we don't handle OOMs, i.e. if user mode
+ * will allocate too many pixmaps then kernel will complain and
+ * everything will break. Later we'll implement our own
+ * ttm_mem_type_manager_func and handle OOMs.
+ */
+ man->func = &ttm_bo_manager_func;
+ man->gpu_offset = 0;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED |
+ TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ default:
+ DRM_ERROR("unsupported memory type: %u\n", (unsigned)type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int vigs_ttm_verify_access(struct ttm_buffer_object *bo,
+ struct file *filp)
+{
+ return 0;
+}
+
+static int vigs_ttm_io_mem_reserve(struct ttm_bo_device *bo_dev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bo_dev->man[mem->mem_type];
+ struct vigs_mman *mman = bo_dev_to_vigs_mman(bo_dev);
+
+ mem->bus.addr = NULL;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) {
+ return -EINVAL;
+ }
+
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ return 0;
+ case TTM_PL_VRAM:
+ DRM_DEBUG_DRIVER("VRAM reservation\n");
+ mem->bus.is_iomem = true;
+ mem->bus.base = mman->vram_base;
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ break;
+ case TTM_PL_PRIV0:
+ DRM_DEBUG_DRIVER("RAM reservation\n");
+ mem->bus.is_iomem = true;
+ mem->bus.base = mman->ram_base;
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void vigs_ttm_io_mem_free(struct ttm_bo_device *bo_dev,
+ struct ttm_mem_reg *mem)
+{
+}
+
+static struct ttm_bo_driver vigs_ttm_bo_driver =
+{
+ .ttm_tt_create = &vigs_ttm_tt_create, /* Only needed for ttm_bo_type_kernel */
+ .invalidate_caches = &vigs_ttm_invalidate_caches,
+ .init_mem_type = &vigs_ttm_init_mem_type,
+ /*
+ * We don't support eviction right now, this will be supported
+ * later, so for now all buffers are always pinned.
+ */
+ .evict_flags = NULL,
+ .verify_access = &vigs_ttm_verify_access,
+ .io_mem_reserve = &vigs_ttm_io_mem_reserve,
+ .io_mem_free = &vigs_ttm_io_mem_free,
+};
+
+static struct vm_operations_struct vigs_ttm_vm_ops;
+static const struct vm_operations_struct *ttm_vm_ops = NULL;
+
+static int vigs_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct ttm_buffer_object *bo;
+
+ bo = (struct ttm_buffer_object*)vma->vm_private_data;
+
+ if (bo == NULL) {
+ return VM_FAULT_NOPAGE;
+ }
+
+ return ttm_vm_ops->fault(vma, vmf);
+}
+
+int vigs_mman_create(resource_size_t vram_base,
+ resource_size_t vram_size,
+ resource_size_t ram_base,
+ resource_size_t ram_size,
+ struct vigs_mman **mman)
+{
+ int ret = 0;
+ unsigned long num_pages = 0;
+
+ DRM_DEBUG_DRIVER("enter\n");
+
+ *mman = kzalloc(sizeof(**mman), GFP_KERNEL);
+
+ if (!*mman) {
+ ret = -ENOMEM;
+ goto fail1;
+ }
+
+ ret = vigs_mman_global_init(*mman);
+
+ if (ret != 0) {
+ goto fail2;
+ }
+
+ (*mman)->vram_base = vram_base;
+ (*mman)->ram_base = ram_base;
+
+ ret = ttm_bo_device_init(&(*mman)->bo_dev,
+ (*mman)->bo_global_ref.ref.object,
+ &vigs_ttm_bo_driver,
+ DRM_FILE_PAGE_OFFSET,
+ 0);
+ if (ret != 0) {
+ DRM_ERROR("failed initializing bo driver: %d\n", ret);
+ goto fail3;
+ }
+
+ /*
+ * Init VRAM
+ * @{
+ */
+
+ num_pages = vram_size / PAGE_SIZE;
+
+ ret = ttm_bo_init_mm(&(*mman)->bo_dev,
+ TTM_PL_VRAM,
+ num_pages);
+ if (ret != 0) {
+ DRM_ERROR("failed initializing VRAM mm\n");
+ goto fail4;
+ }
+
+ /*
+ * @}
+ */
+
+ /*
+ * Init RAM
+ * @{
+ */
+
+ num_pages = ram_size / PAGE_SIZE;
+
+ ret = ttm_bo_init_mm(&(*mman)->bo_dev,
+ TTM_PL_PRIV0,
+ num_pages);
+ if (ret != 0) {
+ DRM_ERROR("failed initializing RAM mm\n");
+ goto fail5;
+ }
+
+ /*
+ * @}
+ */
+
+ return 0;
+
+fail5:
+ ttm_bo_clean_mm(&(*mman)->bo_dev, TTM_PL_VRAM);
+fail4:
+ ttm_bo_device_release(&(*mman)->bo_dev);
+fail3:
+ vigs_mman_global_cleanup(*mman);
+fail2:
+ kfree(*mman);
+fail1:
+ *mman = NULL;
+
+ return ret;
+}
+
+void vigs_mman_destroy(struct vigs_mman *mman)
+{
+ DRM_DEBUG_DRIVER("enter\n");
+
+ ttm_bo_clean_mm(&mman->bo_dev, TTM_PL_PRIV0);
+ ttm_bo_clean_mm(&mman->bo_dev, TTM_PL_VRAM);
+ ttm_bo_device_release(&mman->bo_dev);
+ vigs_mman_global_cleanup(mman);
+
+ kfree(mman);
+}
+
+int vigs_mman_mmap(struct vigs_mman *mman,
+ struct file *filp,
+ struct vm_area_struct *vma)
+{
+ int ret;
+
+ if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
+ return drm_mmap(filp, vma);
+ }
+
+ ret = ttm_bo_mmap(filp, vma, &mman->bo_dev);
+
+ if (unlikely(ret != 0)) {
+ return ret;
+ }
+
+ if (unlikely(ttm_vm_ops == NULL)) {
+ ttm_vm_ops = vma->vm_ops;
+ vigs_ttm_vm_ops = *ttm_vm_ops;
+ vigs_ttm_vm_ops.fault = &vigs_ttm_fault;
+ }
+
+ vma->vm_ops = &vigs_ttm_vm_ops;
+
+ return 0;
+}
--- /dev/null
+#ifndef _VIGS_MMAN_H_
+#define _VIGS_MMAN_H_
+
+#include "drmP.h"
+#include <ttm/ttm_bo_driver.h>
+
+struct vigs_mman
+{
+ struct drm_global_reference mem_global_ref;
+ struct ttm_bo_global_ref bo_global_ref;
+ struct ttm_bo_device bo_dev;
+
+ resource_size_t vram_base;
+ resource_size_t ram_base;
+};
+
+static inline struct vigs_mman *bo_dev_to_vigs_mman(struct ttm_bo_device *bo_dev)
+{
+ return container_of(bo_dev, struct vigs_mman, bo_dev);
+}
+
+int vigs_mman_create(resource_size_t vram_base,
+ resource_size_t vram_size,
+ resource_size_t ram_base,
+ resource_size_t ram_size,
+ struct vigs_mman **mman);
+
+void vigs_mman_destroy(struct vigs_mman *mman);
+
+int vigs_mman_mmap(struct vigs_mman *mman,
+ struct file *filp,
+ struct vm_area_struct *vma);
+
+#endif
--- /dev/null
+#include "vigs_output.h"
+#include "vigs_device.h"
+#include "drm_crtc_helper.h"
+#include <linux/init.h>
+
+#define DPI_DEF_VALUE 3160
+#define DPI_MIN_VALUE 1000
+#define DPI_MAX_VALUE 4800
+
+#ifndef MODULE
+static int vigs_atoi(const char *str)
+{
+ int val = 0;
+
+ for (;; ++str) {
+ switch (*str) {
+ case '0' ... '9':
+ val = (10 * val) + (*str - '0');
+ break;
+ default:
+ return val;
+ }
+ }
+}
+#endif
+
+struct vigs_output
+{
+ /*
+ * 'connector' is the owner of the 'vigs_output', i.e.
+ * when 'connector' is destroyed whole structure is destroyed.
+ */
+ struct drm_connector connector;
+ struct drm_encoder encoder;
+};
+
+static inline struct vigs_output *connector_to_vigs_output(struct drm_connector *connector)
+{
+ return container_of(connector, struct vigs_output, connector);
+}
+
+static inline struct vigs_output *encoder_to_vigs_output(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct vigs_output, encoder);
+}
+
+static void vigs_connector_save(struct drm_connector *connector)
+{
+ DRM_DEBUG_KMS("enter\n");
+}
+
+static void vigs_connector_restore(struct drm_connector *connector)
+{
+ DRM_DEBUG_KMS("enter\n");
+}
+
+static enum drm_connector_status vigs_connector_detect(
+ struct drm_connector *connector,
+ bool force)
+{
+ DRM_DEBUG_KMS("enter: force = %d\n", force);
+
+ return connector_status_connected;
+}
+
+static int vigs_connector_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t value)
+{
+ DRM_DEBUG_KMS("enter: %s = %llu\n", property->name, value);
+
+ return 0;
+}
+
+static void vigs_connector_destroy(struct drm_connector *connector)
+{
+ struct vigs_output *vigs_output = connector_to_vigs_output(connector);
+
+ DRM_DEBUG_KMS("enter\n");
+
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+
+ kfree(vigs_output);
+}
+
+static int vigs_connector_get_modes(struct drm_connector *connector)
+{
+ struct vigs_output *vigs_output = connector_to_vigs_output(connector);
+ struct drm_device *drm_dev = vigs_output->connector.dev;
+ int i;
+ struct
+ {
+ int w;
+ int h;
+ } modes[] =
+ {
+ { 480, 800 },
+ { 720, 1280 },
+ };
+
+ DRM_DEBUG_KMS("enter\n");
+
+ for (i = 0; i < ARRAY_SIZE(modes); i++) {
+ struct drm_display_mode *mode =
+ drm_cvt_mode(drm_dev,
+ modes[i].w,
+ modes[i].h,
+ 60, false, false, false);
+ drm_mode_probed_add(connector, mode);
+ }
+
+ return (i - 1);
+}
+
+static int vigs_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ DRM_DEBUG_KMS("enter\n");
+
+ return MODE_OK;
+}
+
+struct drm_encoder *vigs_connector_best_encoder(struct drm_connector *connector)
+{
+ struct vigs_output *vigs_output = connector_to_vigs_output(connector);
+
+ DRM_DEBUG_KMS("enter\n");
+
+ return &vigs_output->encoder;
+}
+
+static void vigs_encoder_destroy(struct drm_encoder *encoder)
+{
+ DRM_DEBUG_KMS("enter\n");
+
+ drm_encoder_cleanup(encoder);
+}
+
+static void vigs_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ DRM_DEBUG_KMS("enter: mode = %d\n", mode);
+}
+
+static bool vigs_encoder_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ DRM_DEBUG_KMS("enter\n");
+
+ return true;
+}
+
+static void vigs_encoder_prepare(struct drm_encoder *encoder)
+{
+ DRM_DEBUG_KMS("enter\n");
+}
+
+static void vigs_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ DRM_DEBUG_KMS("enter\n");
+}
+
+static void vigs_encoder_commit(struct drm_encoder *encoder)
+{
+ DRM_DEBUG_KMS("enter\n");
+}
+
+static const struct drm_connector_funcs vigs_connector_funcs =
+{
+ .dpms = drm_helper_connector_dpms,
+ .save = vigs_connector_save,
+ .restore = vigs_connector_restore,
+ .detect = vigs_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = vigs_connector_set_property,
+ .destroy = vigs_connector_destroy,
+};
+
+static const struct drm_connector_helper_funcs vigs_connector_helper_funcs =
+{
+ .get_modes = vigs_connector_get_modes,
+ .mode_valid = vigs_connector_mode_valid,
+ .best_encoder = vigs_connector_best_encoder,
+};
+
+static const struct drm_encoder_funcs vigs_encoder_funcs =
+{
+ .destroy = vigs_encoder_destroy,
+};
+
+static const struct drm_encoder_helper_funcs vigs_encoder_helper_funcs =
+{
+ .dpms = vigs_encoder_dpms,
+ .mode_fixup = vigs_encoder_mode_fixup,
+ .prepare = vigs_encoder_prepare,
+ .mode_set = vigs_encoder_mode_set,
+ .commit = vigs_encoder_commit,
+};
+
+int vigs_output_init(struct vigs_device *vigs_dev)
+{
+ struct vigs_output *vigs_output;
+ int ret;
+
+ DRM_DEBUG_KMS("enter\n");
+
+ vigs_output = kzalloc(sizeof(*vigs_output), GFP_KERNEL);
+
+ if (!vigs_output) {
+ return -ENOMEM;
+ }
+
+ ret = drm_connector_init(vigs_dev->drm_dev,
+ &vigs_output->connector,
+ &vigs_connector_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL);
+
+ if (ret != 0) {
+ kfree(vigs_output);
+ return ret;
+ }
+
+ ret = drm_encoder_init(vigs_dev->drm_dev,
+ &vigs_output->encoder,
+ &vigs_encoder_funcs,
+ DRM_MODE_ENCODER_VIRTUAL);
+
+ if (ret != 0) {
+ /*
+ * KMS subsystem will delete 'vigs_output'
+ */
+
+ return ret;
+ }
+
+ /*
+ * We only have a single CRTC.
+ */
+ vigs_output->encoder.possible_crtcs = (1 << 0);
+
+ ret = drm_mode_connector_attach_encoder(&vigs_output->connector,
+ &vigs_output->encoder);
+
+ if (ret != 0) {
+ return ret;
+ }
+
+ drm_encoder_helper_add(&vigs_output->encoder, &vigs_encoder_helper_funcs);
+
+ drm_connector_helper_add(&vigs_output->connector, &vigs_connector_helper_funcs);
+
+ ret = drm_sysfs_connector_add(&vigs_output->connector);
+
+ if (ret != 0) {
+ return ret;
+ }
+
+ return 0;
+}
+
+int vigs_output_get_dpi(void)
+{
+ int dpi = DPI_DEF_VALUE;
+#ifndef MODULE
+ char *str;
+ char dpi_info[16];
+
+ str = strstr(saved_command_line, "dpi=");
+
+ if (str != NULL) {
+ str += 4;
+ strncpy(dpi_info, str, 4);
+ dpi = vigs_atoi(dpi_info);
+ if ((dpi < DPI_MIN_VALUE) || (dpi > DPI_MAX_VALUE)) {
+ dpi = DPI_DEF_VALUE;
+ }
+ }
+#endif
+ return dpi;
+}
+
+int vigs_output_get_phys_width(int dpi, u32 width)
+{
+ return ((width * 2540 / dpi) + 5) / 10;
+}
+
+int vigs_output_get_phys_height(int dpi, u32 height)
+{
+ return ((height * 2540 / dpi) + 5) / 10;
+}
--- /dev/null
+#ifndef _VIGS_OUTPUT_H_
+#define _VIGS_OUTPUT_H_
+
+#include "drmP.h"
+
+struct vigs_device;
+
+int vigs_output_init(struct vigs_device *vigs_dev);
+
+int vigs_output_get_dpi(void);
+
+int vigs_output_get_phys_width(int dpi, u32 width);
+
+int vigs_output_get_phys_height(int dpi, u32 height);
+
+#endif
--- /dev/null
+#ifndef _VIGS_PROTOCOL_H_
+#define _VIGS_PROTOCOL_H_
+
+/*
+ * VIGS protocol is a request-response protocol.
+ *
+ * + Requests come one by one.
+ * + The response is written after the request.
+ */
+
+/*
+ * Bump this whenever protocol changes.
+ */
+#define VIGS_PROTOCOL_VERSION 9
+
+typedef signed char vigsp_s8;
+typedef signed short vigsp_s16;
+typedef signed int vigsp_s32;
+typedef signed long long vigsp_s64;
+typedef unsigned char vigsp_u8;
+typedef unsigned short vigsp_u16;
+typedef unsigned int vigsp_u32;
+typedef unsigned long long vigsp_u64;
+
+typedef vigsp_u32 vigsp_bool;
+typedef vigsp_u32 vigsp_surface_id;
+typedef vigsp_s32 vigsp_offset;
+typedef vigsp_u32 vigsp_color;
+typedef vigsp_u64 vigsp_va;
+typedef vigsp_u32 vigsp_resource_id;
+
+typedef enum
+{
+ vigsp_cmd_init = 0x0,
+ vigsp_cmd_reset = 0x1,
+ vigsp_cmd_exit = 0x2,
+ vigsp_cmd_create_surface = 0x3,
+ vigsp_cmd_destroy_surface = 0x4,
+ vigsp_cmd_set_root_surface = 0x5,
+ vigsp_cmd_copy = 0x6,
+ vigsp_cmd_solid_fill = 0x7,
+ vigsp_cmd_update_vram = 0x8,
+ vigsp_cmd_put_image = 0x9,
+ vigsp_cmd_get_image = 0xA,
+ vigsp_cmd_assign_resource = 0xB,
+ vigsp_cmd_destroy_resource = 0xC,
+} vigsp_cmd;
+
+typedef enum
+{
+ /*
+ * Start from 0x1 to detect host failures on target.
+ */
+ vigsp_status_success = 0x1,
+ vigsp_status_bad_call = 0x2,
+ vigsp_status_exec_error = 0x3,
+} vigsp_status;
+
+typedef enum
+{
+ vigsp_surface_bgrx8888 = 0x0,
+ vigsp_surface_bgra8888 = 0x1,
+} vigsp_surface_format;
+
+typedef enum
+{
+ vigsp_resource_window = 0x0,
+ vigsp_resource_pixmap = 0x1,
+} vigsp_resource_type;
+
+#pragma pack(1)
+
+/*
+ * 'vram_offset' is both surface data offset
+ * and dirty flag. when it's < 0 it means surface data
+ * is not allocated on target or surface is not dirty.
+ * When it's >= 0 it means either surface data has been allocated
+ * or surface is dirty in case if data has been allocated before.
+ */
+struct vigsp_surface
+{
+ vigsp_surface_id id;
+ vigsp_offset vram_offset;
+};
+
+struct vigsp_point
+{
+ vigsp_u32 x;
+ vigsp_u32 y;
+};
+
+struct vigsp_size
+{
+ vigsp_u32 w;
+ vigsp_u32 h;
+};
+
+struct vigsp_rect
+{
+ struct vigsp_point pos;
+ struct vigsp_size size;
+};
+
+struct vigsp_copy
+{
+ struct vigsp_point from;
+ struct vigsp_point to;
+ struct vigsp_size size;
+};
+
+struct vigsp_cmd_request_header
+{
+ vigsp_cmd cmd;
+
+ /*
+ * Response offset counting after request header.
+ */
+ vigsp_u32 response_offset;
+};
+
+struct vigsp_cmd_response_header
+{
+ vigsp_status status;
+};
+
+/*
+ * cmd_init
+ *
+ * First command to be sent, client passes its protocol version
+ * and receives server's in response. If 'client_version' doesn't match
+ * 'server_version' then initialization is considered failed. This
+ * is typically called on target's DRM driver load.
+ *
+ * @{
+ */
+
+struct vigsp_cmd_init_request
+{
+ vigsp_u32 client_version;
+};
+
+struct vigsp_cmd_init_response
+{
+ vigsp_u32 server_version;
+};
+
+/*
+ * @}
+ */
+
+/*
+ * cmd_reset
+ *
+ * Destroys all surfaces but root surface, this typically happens
+ * or DRM's lastclose.
+ *
+ * @{
+ * @}
+ */
+
+/*
+ * cmd_exit
+ *
+ * Destroys all surfaces and transitions into uninitialized state, this
+ * typically happens when target's DRM driver gets unloaded.
+ *
+ * @{
+ * @}
+ */
+
+/*
+ * cmd_create_surface
+ *
+ * Called for each surface created. Server returns 'id' of the surface,
+ * all further operations must be carried out using this is. 'id' is
+ * unique across whole target system, because there can be only one
+ * DRM master (like X.Org) on target and this master typically wants to
+ * share the surfaces with other processes.
+ *
+ * 'vram_offset' points to the surface data in VRAM, if any. If no surface data
+ * is provided then 'vram_surface' must be < 0.
+ *
+ * @{
+ */
+
+struct vigsp_cmd_create_surface_request
+{
+ vigsp_u32 width;
+ vigsp_u32 height;
+ vigsp_u32 stride;
+ vigsp_surface_format format;
+ vigsp_offset vram_offset;
+};
+
+struct vigsp_cmd_create_surface_response
+{
+ vigsp_surface_id id;
+};
+
+/*
+ * @}
+ */
+
+/*
+ * cmd_destroy_surface
+ *
+ * Destroys the surface identified by 'id'. Surface 'id' may not be used
+ * after this call and its data can be assigned to some other surface right
+ * after this call.
+ *
+ * @{
+ */
+
+struct vigsp_cmd_destroy_surface_request
+{
+ vigsp_surface_id id;
+};
+
+/*
+ * @}
+ */
+
+/*
+ * cmd_set_root_surface
+ *
+ * Sets surface identified by 'id' as new root surface. Root surface is the
+ * one that's displayed on screen. Root surface must have data.
+ *
+ * Pass 0 as id in order to reset the root surface.
+ *
+ * @{
+ */
+
+struct vigsp_cmd_set_root_surface_request
+{
+ vigsp_surface_id id;
+};
+
+/*
+ * @}
+ */
+
+/*
+ * cmd_copy
+ *
+ * Copies parts of surface 'src' to
+ * surface 'dst'.
+ *
+ * @{
+ */
+
+struct vigsp_cmd_copy_request
+{
+ struct vigsp_surface src;
+ struct vigsp_surface dst;
+ vigsp_u32 num_entries;
+ struct vigsp_copy entries[0];
+};
+
+/*
+ * @}
+ */
+
+/*
+ * cmd_solid_fill
+ *
+ * Fills surface 'sfc' with color 'color' at 'entries'.
+ *
+ * @{
+ */
+
+struct vigsp_cmd_solid_fill_request
+{
+ struct vigsp_surface sfc;
+ vigsp_color color;
+ vigsp_u32 num_entries;
+ struct vigsp_rect entries[0];
+};
+
+/*
+ * @}
+ */
+
+/*
+ * cmd_update_vram
+ *
+ * Updates 'sfc' data in vram.
+ *
+ * @{
+ */
+
+struct vigsp_cmd_update_vram_request
+{
+ struct vigsp_surface sfc;
+};
+
+/*
+ * @}
+ */
+
+/*
+ * cmd_put_image
+ *
+ * Puts image 'src_va' on surface 'sfc'.
+ * Host may detect page fault condition, in that case it'll
+ * set 'is_pf' to 1 in response, target then must fault in 'src_va'
+ * memory and repeat this command.
+ *
+ * @{
+ */
+
+struct vigsp_cmd_put_image_request
+{
+ struct vigsp_surface sfc;
+ vigsp_va src_va;
+ vigsp_u32 src_stride;
+ struct vigsp_rect rect;
+};
+
+struct vigsp_cmd_put_image_response
+{
+ vigsp_bool is_pf;
+};
+
+/*
+ * @}
+ */
+
+/*
+ * cmd_get_image
+ *
+ * Gets image 'dst_va' from surface 'sfc_id'.
+ * Host may detect page fault condition, in that case it'll
+ * set 'is_pf' to 1 in response, target then must fault in 'dst_va'
+ * memory and repeat this command.
+ *
+ * @{
+ */
+
+struct vigsp_cmd_get_image_request
+{
+ vigsp_surface_id sfc_id;
+ vigsp_va dst_va;
+ vigsp_u32 dst_stride;
+ struct vigsp_rect rect;
+};
+
+struct vigsp_cmd_get_image_response
+{
+ vigsp_bool is_pf;
+};
+
+/*
+ * @}
+ */
+
+/*
+ * cmd_assign_resource
+ *
+ * Assign resource 'res_id' to refer to surface 'sfc_id'.
+ *
+ * @{
+ */
+
+struct vigsp_cmd_assign_resource_request
+{
+ vigsp_resource_id res_id;
+ vigsp_resource_type res_type;
+ vigsp_surface_id sfc_id;
+};
+
+/*
+ * @}
+ */
+
+/*
+ * cmd_destroy_resource
+ *
+ * Destroys resource 'id'.
+ *
+ * @{
+ */
+
+struct vigsp_cmd_destroy_resource_request
+{
+ vigsp_resource_id id;
+};
+
+/*
+ * @}
+ */
+
+#pragma pack()
+
+#endif
--- /dev/null
+/*
+ * vigs_drm.h
+ */
+
+#ifndef _VIGS_DRM_H_
+#define _VIGS_DRM_H_
+
+/*
+ * Bump this whenever driver interface changes.
+ */
+#define DRM_VIGS_DRIVER_VERSION 3
+
+#define DRM_VIGS_GEM_DOMAIN_VRAM 0
+#define DRM_VIGS_GEM_DOMAIN_RAM 1
+
+struct drm_vigs_get_protocol_version
+{
+ uint32_t version;
+};
+
+struct drm_vigs_gem_create
+{
+ uint32_t domain;
+ uint32_t size;
+ uint32_t handle;
+ uint32_t domain_offset;
+};
+
+struct drm_vigs_gem_mmap
+{
+ uint32_t handle;
+ uint64_t offset;
+};
+
+struct drm_vigs_gem_info
+{
+ uint32_t handle;
+ uint32_t domain;
+ uint32_t domain_offset;
+};
+
+struct drm_vigs_user_enter
+{
+ uint32_t index;
+};
+
+struct drm_vigs_user_leave
+{
+ uint32_t index;
+};
+
+struct drm_vigs_fb_info
+{
+ uint32_t fb_id;
+ uint32_t sfc_id;
+};
+
+#define DRM_VIGS_GET_PROTOCOL_VERSION 0x00
+#define DRM_VIGS_GEM_CREATE 0x01
+#define DRM_VIGS_GEM_MMAP 0x02
+#define DRM_VIGS_GEM_INFO 0x03
+#define DRM_VIGS_USER_ENTER 0x04
+#define DRM_VIGS_USER_LEAVE 0x05
+#define DRM_VIGS_FB_INFO 0x06
+
+#define DRM_IOCTL_VIGS_GET_PROTOCOL_VERSION DRM_IOR(DRM_COMMAND_BASE + \
+ DRM_VIGS_GET_PROTOCOL_VERSION, struct drm_vigs_get_protocol_version)
+#define DRM_IOCTL_VIGS_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_VIGS_GEM_CREATE, struct drm_vigs_gem_create)
+#define DRM_IOCTL_VIGS_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_VIGS_GEM_MMAP, struct drm_vigs_gem_mmap)
+#define DRM_IOCTL_VIGS_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_VIGS_GEM_INFO, struct drm_vigs_gem_info)
+#define DRM_IOCTL_VIGS_USER_ENTER DRM_IOR(DRM_COMMAND_BASE + \
+ DRM_VIGS_USER_ENTER, struct drm_vigs_user_enter)
+#define DRM_IOCTL_VIGS_USER_LEAVE DRM_IOW(DRM_COMMAND_BASE + \
+ DRM_VIGS_USER_LEAVE, struct drm_vigs_user_leave)
+#define DRM_IOCTL_VIGS_FB_INFO DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_VIGS_FB_INFO, struct drm_vigs_fb_info)
+
+#endif