* and with small amount of VRAM memory it's possible that
* GEM pin will be failing for some time, thus, framebuffer pin
* will be failing. This is unavoidable with current TTM design,
- * even though ttm_bo_validate has 'no_wait_reserve' parameter it's
- * always assumed that it's true, thus, if someone is intensively
+ * thus, if someone is intensively
* reserves/unreserves GEMs then ttm_bo_validate can fail even if there
* is free space in a placement. Even worse, ttm_bo_validate fails with
* ENOMEM so it's not possible to tell if it's a temporary failure due
* is relatively safe since we only pin GEMs on pageflip and user
* should have started the VM with VRAM size equal to at least 3 frames,
* thus, 2 frame will always be free and we can always pin 1 frame.
- *
- * Also, 'no_wait_reserve' parameter is completely removed in future
- * kernels with this commit:
- * https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=97a875cbdf89a4638eea57c2b456c7cc4e3e8b21
*/
cpu_relax();
goto retry;
}
static bool vigs_crtc_mode_fixup(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
DRM_DEBUG_KMS("enter\n");
static int vigs_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event)
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
{
unsigned long flags;
struct vigs_device *vigs_dev = crtc->dev->dev_private;
bool need_gpu_update = vigs_surface_need_gpu_update(vigs_sfc);
if (!vigs_sfc->is_gpu_dirty && need_gpu_update) {
- DRM_INFO("vram_to_gpu: 0x%llX\n", bo->addr_space_offset);
+ DRM_INFO("vram_to_gpu: 0x%llX\n",
+ drm_vma_node_offset_addr(&bo->vma_node));
vigs_comm_update_gpu(vigs_dev->comm,
vigs_sfc->id,
vigs_sfc->width,
vigs_sfc->height,
vigs_gem_offset(vigs_gem));
} else {
- DRM_INFO("vram_to_gpu: 0x%llX (no-op)\n", bo->addr_space_offset);
+ DRM_INFO("vram_to_gpu: 0x%llX (no-op)\n",
+ drm_vma_node_offset_addr(&bo->vma_node));
}
vigs_sfc->is_gpu_dirty = false;
struct vigs_surface *vigs_sfc = vigs_gem_to_vigs_surface(vigs_gem);
if (vigs_surface_need_vram_update(vigs_sfc)) {
- DRM_DEBUG_DRIVER("0x%llX\n", bo->addr_space_offset);
+ DRM_DEBUG_DRIVER("0x%llX\n",
+ drm_vma_node_offset_addr(&bo->vma_node));
vigs_comm_update_vram(vigs_dev->comm,
vigs_sfc->id,
new_offset);
} else {
- DRM_DEBUG_DRIVER("0x%llX (no-op)\n", bo->addr_space_offset);
+ DRM_DEBUG_DRIVER("0x%llX (no-op)\n",
+ drm_vma_node_offset_addr(&bo->vma_node));
}
}
struct vigs_surface *sfc,
vigsp_surface_id* id)
{
- int ret, tmp_id = 0;
+ int ret;
mutex_lock(&vigs_dev->surface_idr_mutex);
- do {
- if (unlikely(idr_pre_get(&vigs_dev->surface_idr, GFP_KERNEL) == 0)) {
- mutex_unlock(&vigs_dev->surface_idr_mutex);
- return -ENOMEM;
- }
+ ret = idr_alloc(&vigs_dev->surface_idr, sfc, 1, 0, GFP_KERNEL);
- ret = idr_get_new_above(&vigs_dev->surface_idr, sfc, 1, &tmp_id);
- } while (ret == -EAGAIN);
+ mutex_unlock(&vigs_dev->surface_idr_mutex);
- *id = tmp_id;
+ if (ret < 0) {
+ return ret;
+ }
- mutex_unlock(&vigs_dev->surface_idr_mutex);
+ *id = ret;
- return ret;
+ return 0;
}
void vigs_device_remove_surface(struct vigs_device *vigs_dev,
#include "vigs_file.h"
#include "vigs_plane.h"
#include "vigs_mman.h"
-#include "drmP.h"
-#include "drm.h"
+#include <drm/drmP.h>
#include <linux/module.h>
#include <drm/vigs_drm.h>
#define DRIVER_MAJOR DRM_VIGS_DRIVER_VERSION
#define DRIVER_MINOR 0
-static struct pci_device_id vigs_pci_table[] __devinitdata =
+static struct pci_device_id vigs_pci_table[] =
{
{
.vendor = PCI_VENDOR_ID_VIGS,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
.poll = drm_poll,
- .fasync = drm_fasync,
.mmap = vigs_device_mmap,
.read = drm_read
};
file_priv->driver_priv = vigs_file;
- if (unlikely(vigs_dev->mman->bo_dev.dev_mapping == NULL)) {
- vigs_dev->mman->bo_dev.dev_mapping =
- file_priv->filp->f_path.dentry->d_inode->i_mapping;
- }
+ vigs_dev->mman->bo_dev.dev_mapping = dev->dev_mapping;
return 0;
}
.minor = DRIVER_MINOR,
};
-static int __devinit vigs_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int vigs_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
return drm_get_pci_dev(pdev, ent, &vigs_drm_driver);
}
.name = DRIVER_NAME,
.id_table = vigs_pci_table,
.probe = vigs_pci_probe,
- .remove = __devexit_p(vigs_pci_remove),
+ .remove = vigs_pci_remove,
};
int vigs_driver_register(void)
return -EINVAL;
}
- buffer->base.new_sync_obj_arg = NULL;
buffer->base.bo = &sfc->gem.bo;
buffer->cmd = cmd;
buffer->which = which;
struct drm_gem_object *gem;
struct vigs_gem_object *vigs_gem;
struct vigs_execbuffer *execbuffer;
+ struct ww_acquire_ctx ticket;
struct list_head list;
struct vigs_validate_buffer *buffers;
int num_buffers = 0;
if (list_empty(&list)) {
vigs_comm_exec(vigs_dev->comm, execbuffer);
} else {
- ret = ttm_eu_reserve_buffers(&list);
+ ret = ttm_eu_reserve_buffers(&ticket, &list);
if (ret != 0) {
- ttm_eu_backoff_reservation(&list);
goto out3;
}
ret = vigs_fence_create(vigs_dev->fenceman, &fence);
if (ret != 0) {
- ttm_eu_backoff_reservation(&list);
+ ttm_eu_backoff_reservation(&ticket, &list);
goto out3;
}
vigs_comm_exec(vigs_dev->comm, execbuffer);
- ttm_eu_fence_buffer_objects(&list, fence);
+ ttm_eu_fence_buffer_objects(&ticket, &list, fence);
if (sync) {
vigs_fence_wait(fence, false);
* @{
*/
+static bool vigs_fbdev_helper_is_bound(struct drm_fb_helper *fb_helper)
+{
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_crtc *crtc;
+ int bound = 0, crtcs_bound = 0;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc->fb) {
+ crtcs_bound++;
+ }
+
+ if (crtc->fb == fb_helper->fb) {
+ bound++;
+ }
+ }
+
+ if (bound < crtcs_bound) {
+ return false;
+ }
+
+ return true;
+}
+
static int vigs_fbdev_setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, u16 regno, struct fb_info *fbi)
{
return 0;
}
+ /*
+ * The driver really shouldn't advertise pseudo/directcolor
+ * visuals if it can't deal with the palette.
+ */
+ if (WARN_ON(!fb_helper->funcs->gamma_set ||
+ !fb_helper->funcs->gamma_get)) {
+ return -EINVAL;
+ }
+
pindex = regno;
if (fb->bits_per_pixel == 16) {
static int vigs_fbdev_setcmap(struct fb_cmap *cmap, struct fb_info *fbi)
{
struct drm_fb_helper *fb_helper = fbi->par;
+ struct drm_device *dev = fb_helper->dev;
struct drm_crtc_helper_funcs *crtc_funcs;
u16 *red, *green, *blue, *transp;
struct drm_crtc *crtc;
int i, j, ret = 0;
int start;
+ drm_modeset_lock_all(dev);
+ if (!vigs_fbdev_helper_is_bound(fb_helper)) {
+ drm_modeset_unlock_all(dev);
+ return -EBUSY;
+ }
+
for (i = 0; i < fb_helper->crtc_count; i++) {
crtc = fb_helper->crtc_info[i].mode_set.crtc;
crtc_funcs = crtc->helper_private;
ret = vigs_fbdev_setcolreg(crtc, hred, hgreen, hblue, start++, fbi);
if (ret != 0) {
- return ret;
+ goto out;
}
}
- crtc_funcs->load_lut(crtc);
+ if (crtc_funcs->load_lut) {
+ crtc_funcs->load_lut(crtc);
+ }
}
+ out:
+ drm_modeset_unlock_all(dev);
return ret;
}
int i, j;
/*
+ * fbdev->blank can be called from irq context in case of a panic.
+ * Since we already have our own special panic handler which will
+ * restore the fbdev console mode completely, just bail out early.
+ */
+ if (oops_in_progress) {
+ return;
+ }
+
+ /*
* For each CRTC in this fb, turn the connectors on/off.
*/
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
+ if (!vigs_fbdev_helper_is_bound(fb_helper)) {
+ drm_modeset_unlock_all(dev);
+ return;
+ }
for (i = 0; i < fb_helper->crtc_count; i++) {
crtc = fb_helper->crtc_info[i].mode_set.crtc;
/* Walk the connectors & encoders on this fb turning them on/off */
for (j = 0; j < fb_helper->connector_count; j++) {
connector = fb_helper->connector_info[j]->connector;
- drm_helper_connector_dpms(connector, dpms_mode);
- drm_connector_property_set_value(connector,
+ connector->funcs->dpms(connector, dpms_mode);
+ drm_object_property_set_value(&connector->base,
dev->mode_config.dpms_property, dpms_mode);
}
vigs_crtc->in_fb_blank = false;
}
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
}
/*
void vigs_fbdev_destroy(struct vigs_fbdev *vigs_fbdev)
{
struct fb_info *fbi = vigs_fbdev->base.fbdev;
+ struct drm_framebuffer *fb;
DRM_DEBUG_KMS("enter\n");
framebuffer_release(fbi);
}
+ fb = vigs_fbdev->base.fb;
+
drm_fb_helper_fini(&vigs_fbdev->base);
if (vigs_fbdev->kptr) {
iounmap(vigs_fbdev->kptr);
}
+ drm_framebuffer_unregister_private(fb);
+ drm_framebuffer_remove(fb);
+
kfree(vigs_fbdev);
}
{
DRM_DEBUG_KMS("enter\n");
+ drm_modeset_lock_all(vigs_fbdev->base.dev);
drm_fb_helper_restore_fbdev_mode(&vigs_fbdev->base);
+ drm_modeset_unlock_all(vigs_fbdev->base.dev);
}
struct vigs_user_fence *user_fence = vigs_fence_to_vigs_user_fence(fence);
vigs_fence_cleanup(&user_fence->fence);
- kfree(user_fence);
+ ttm_base_object_kfree(user_fence, base);
}
static void vigs_fence_release_locked(struct kref *kref)
}
ret = ttm_bo_init(&vigs_dev->mman->bo_dev, &vigs_gem->bo, size, bo_type,
- &placement, 0, 0,
- false, NULL, 0,
+ &placement, 0,
+ false, NULL, 0, NULL,
&vigs_gem_bo_destroy);
if (ret != 0) {
placement.num_placement = 1;
placement.num_busy_placement = 1;
- ret = ttm_bo_validate(&vigs_gem->bo, &placement, false, true, false);
+ ret = ttm_bo_validate(&vigs_gem->bo, &placement, false, false);
if (ret != 0) {
DRM_ERROR("GEM pin failed (type = %u, off = 0x%llX, sz = %lu)\n",
placement.num_placement = 2;
placement.num_busy_placement = 2;
- ret = ttm_bo_validate(&vigs_gem->bo, &placement, false, true, false);
+ ret = ttm_bo_validate(&vigs_gem->bo, &placement, false, false);
if (ret != 0) {
DRM_ERROR("GEM unpin failed (type = %u, off = 0x%llX, sz = %lu)\n",
struct drm_gem_object *gem;
struct vigs_gem_object *vigs_gem;
struct mm_struct *mm = current->mm;
- unsigned long address;
+ unsigned long address, unused;
gem = drm_gem_object_lookup(drm_dev, file_priv, args->handle);
* 'do_mmap' takes an offset in bytes and our
* offset is 64-bit (since it's TTM offset) and it can't fit into 32-bit
* variable.
- * For this to work we had to export
- * 'do_mmap_pgoff'. 'do_mmap_pgoff' was exported prior to
- * 3.4 and it's available after 3.5, but for some reason it's
- * static in 3.4.
*/
vigs_dev->track_gem_access = args->track_access;
address = do_mmap_pgoff(file_priv->filp, 0, vigs_gem_size(vigs_gem),
PROT_READ | PROT_WRITE,
MAP_SHARED,
- vigs_gem_mmap_offset(vigs_gem) >> PAGE_SHIFT);
+ vigs_gem_mmap_offset(vigs_gem) >> PAGE_SHIFT,
+ &unused);
vigs_dev->track_gem_access = false;
up_write(&mm->mmap_sem);
*/
static inline u64 vigs_gem_mmap_offset(struct vigs_gem_object *vigs_gem)
{
- return vigs_gem->bo.addr_space_offset;
+ return drm_vma_node_offset_addr(&vigs_gem->bo.vma_node);
}
static inline void vigs_gem_reserve(struct vigs_gem_object *vigs_gem)
{
struct ttm_dma_tt *dma_tt = (void*)tt;
+ ttm_dma_tt_fini(dma_tt);
kfree(dma_tt);
}
struct page *dummy_read_page)
{
struct ttm_dma_tt *dma_tt;
+ int ret;
dma_tt = kzalloc(sizeof(struct ttm_dma_tt), GFP_KERNEL);
dma_tt->ttm.func = &vigs_ttm_backend_func;
+ ret = ttm_dma_tt_init(dma_tt, bo_dev, size, page_flags,
+ dummy_read_page);
+
+ if (ret != 0) {
+ DRM_ERROR("ttm_dma_tt_init failed: %d\n", ret);
+ kfree(dma_tt);
+ return NULL;
+ }
+
return &dma_tt->ttm;
}
static int vigs_ttm_move(struct ttm_buffer_object *bo,
bool evict,
bool interruptible,
- bool no_wait_reserve,
bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
return 0;
} else {
- return ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
}
}
return 0;
}
-static bool vigs_ttm_sync_obj_signaled(void *sync_obj, void *sync_arg)
+static bool vigs_ttm_sync_obj_signaled(void *sync_obj)
{
return vigs_fence_signaled((struct vigs_fence*)sync_obj);
}
static int vigs_ttm_sync_obj_wait(void *sync_obj,
- void *sync_arg,
bool lazy,
bool interruptible)
{
return vigs_fence_wait((struct vigs_fence*)sync_obj, interruptible);
}
-static int vigs_ttm_sync_obj_flush(void *sync_obj,
- void *sync_arg)
+static int vigs_ttm_sync_obj_flush(void *sync_obj)
{
return 0;
}
placement.num_placement = 1;
placement.num_busy_placement = 1;
- ret = ttm_bo_validate(bo, &placement, false, true, false);
+ ret = ttm_bo_validate(bo, &placement, false, false);
if (ret != 0) {
- DRM_ERROR("movement failed for 0x%llX\n", bo->addr_space_offset);
+ DRM_ERROR("movement failed for 0x%llX\n",
+ drm_vma_node_offset_addr(&bo->vma_node));
return ret;
}
}
static bool vigs_encoder_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
DRM_DEBUG_KMS("enter\n");
struct vigs_plane *vigs_plane;
int ret;
- mutex_lock(&drm_dev->mode_config.mutex);
+ drm_modeset_lock_all(drm_dev);
obj = drm_mode_object_find(drm_dev,
args->plane_id,
ret = 0;
out:
- mutex_unlock(&drm_dev->mode_config.mutex);
+ drm_modeset_unlock_all(drm_dev);
return ret;
}