Backmerging to get latest upstream.
Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
* the EDID then we'll just return 0.
*/
- base_block = kmalloc(EDID_LENGTH, GFP_KERNEL);
+ base_block = kzalloc(EDID_LENGTH, GFP_KERNEL);
if (!base_block)
return 0;
*
* @lru: The LRU to scan
* @nr_to_scan: The number of pages to try to reclaim
+ * @remaining: The number of pages left to reclaim, should be initialized by caller
* @shrink: Callback to try to shrink/reclaim the object.
*/
unsigned long
-drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan,
+drm_gem_lru_scan(struct drm_gem_lru *lru,
+ unsigned int nr_to_scan,
+ unsigned long *remaining,
bool (*shrink)(struct drm_gem_object *obj))
{
struct drm_gem_lru still_in_lru;
* hit shrinker in response to trying to get backing pages
* for this obj (ie. while it's lock is already held)
*/
- if (!dma_resv_trylock(obj->resv))
+ if (!dma_resv_trylock(obj->resv)) {
+ *remaining += obj->size >> PAGE_SHIFT;
goto tail;
+ }
if (shrink(obj)) {
freed += obj->size >> PAGE_SHIFT;
int ret;
if (obj->import_attach) {
- /* Drop the reference drm_gem_mmap_obj() acquired.*/
- drm_gem_object_put(obj);
vma->vm_private_data = NULL;
+ ret = dma_buf_mmap(obj->dma_buf, vma, 0);
+
+ /* Drop the reference drm_gem_mmap_obj() acquired.*/
+ if (!ret)
+ drm_gem_object_put(obj);
- return dma_buf_mmap(obj->dma_buf, vma, 0);
+ return ret;
}
ret = drm_gem_shmem_get_pages(shmem);
priv->io_base + _REG(VPP_DOLBY_CTRL));
writel_relaxed(0x1020080,
priv->io_base + _REG(VPP_DUMMY_DATA1));
+ writel_relaxed(0x42020,
+ priv->io_base + _REG(VPP_DUMMY_DATA));
} else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
writel_relaxed(0xf, priv->io_base + _REG(DOLBY_PATH_CTRL));
bool (*shrink)(struct drm_gem_object *obj);
bool cond;
unsigned long freed;
+ unsigned long remaining;
} stages[] = {
/* Stages of progressively more aggressive/expensive reclaim: */
{ &priv->lru.dontneed, purge, true },
};
long nr = sc->nr_to_scan;
unsigned long freed = 0;
+ unsigned long remaining = 0;
for (unsigned i = 0; (nr > 0) && (i < ARRAY_SIZE(stages)); i++) {
if (!stages[i].cond)
continue;
stages[i].freed =
- drm_gem_lru_scan(stages[i].lru, nr, stages[i].shrink);
+ drm_gem_lru_scan(stages[i].lru, nr,
+ &stages[i].remaining,
+ stages[i].shrink);
nr -= stages[i].freed;
freed += stages[i].freed;
+ remaining += stages[i].remaining;
}
if (freed) {
stages[3].freed);
}
- return (freed > 0) ? freed : SHRINK_STOP;
+ return (freed > 0 && remaining > 0) ? freed : SHRINK_STOP;
}
#ifdef CONFIG_DEBUG_FS
NULL,
};
unsigned idx, unmapped = 0;
+ unsigned long remaining = 0;
for (idx = 0; lrus[idx] && unmapped < vmap_shrink_limit; idx++) {
unmapped += drm_gem_lru_scan(lrus[idx],
vmap_shrink_limit - unmapped,
+ &remaining,
vmap_shrink);
}
if (pm_runtime_active(pfdev->dev))
mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT);
- pm_runtime_put_sync_autosuspend(pfdev->dev);
+ pm_runtime_put_autosuspend(pfdev->dev);
}
static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
/* drm_vblank_init calls kcalloc, which can fail */
ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
if (ret)
- goto cleanup_mode_config;
+ goto unbind_all;
/* Remove early framebuffers (ie. simplefb) */
ret = drm_aperture_remove_framebuffers(false, &sun4i_drv_driver);
if (ret)
- goto cleanup_mode_config;
+ goto unbind_all;
sun4i_framebuffer_init(drm);
finish_poll:
drm_kms_helper_poll_fini(drm);
+unbind_all:
+ component_unbind_all(dev, NULL);
cleanup_mode_config:
drm_mode_config_cleanup(drm);
of_reserved_mem_device_release(dev);
struct ttm_buffer_object *bo = res->bo;
uint32_t num_pages;
- if (!bo)
+ if (!bo || bo->resource != res)
continue;
num_pages = PFN_UP(bo->base.size);
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
if (virtio_gpu_is_shmem(bo) && use_dma_api)
- dma_sync_sgtable_for_device(&vgdev->vdev->dev,
+ dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
bo->base.sgt, DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
if (virtio_gpu_is_shmem(bo) && use_dma_api)
- dma_sync_sgtable_for_device(&vgdev->vdev->dev,
+ dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
bo->base.sgt, DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
if (rc)
return rc;
- if (pci_enable_device(dp) < 0) {
+ rc = pci_enable_device(dp);
+ if (rc < 0) {
dev_err(&dp->dev, "Cannot enable PCI device\n");
goto err_out;
}
- if ((dp->resource[0].flags & IORESOURCE_MEM) == 0)
+ if ((dp->resource[0].flags & IORESOURCE_MEM) == 0) {
+ rc = -ENODEV;
goto err_disable;
+ }
addr = pci_resource_start(dp, 0);
- if (addr == 0)
+ if (addr == 0) {
+ rc = -ENODEV;
goto err_disable;
+ }
p = framebuffer_alloc(0, &dp->dev);
if (p == NULL) {
init_chips(p, addr);
- if (register_framebuffer(p) < 0) {
+ rc = register_framebuffer(p);
+ if (rc < 0) {
dev_err(&dp->dev,"C&T 65550 framebuffer failed to register\n");
goto err_unmap;
}
struct inode *inode,
struct file *file)
{
+ struct fb_deferred_io *fbdefio = info->fbdefio;
+
file->f_mapping->a_ops = &fb_deferred_io_aops;
+ fbdefio->open_count++;
}
EXPORT_SYMBOL_GPL(fb_deferred_io_open);
-void fb_deferred_io_release(struct fb_info *info)
+static void fb_deferred_io_lastclose(struct fb_info *info)
{
- struct fb_deferred_io *fbdefio = info->fbdefio;
struct page *page;
int i;
- BUG_ON(!fbdefio);
cancel_delayed_work_sync(&info->deferred_work);
/* clear out the mapping that we setup */
page->mapping = NULL;
}
}
+
+void fb_deferred_io_release(struct fb_info *info)
+{
+ struct fb_deferred_io *fbdefio = info->fbdefio;
+
+ if (!--fbdefio->open_count)
+ fb_deferred_io_lastclose(info);
+}
EXPORT_SYMBOL_GPL(fb_deferred_io_release);
void fb_deferred_io_cleanup(struct fb_info *info)
{
struct fb_deferred_io *fbdefio = info->fbdefio;
- fb_deferred_io_release(info);
+ fb_deferred_io_lastclose(info);
kvfree(info->pagerefs);
mutex_destroy(&fbdefio->lock);
void drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock);
void drm_gem_lru_remove(struct drm_gem_object *obj);
void drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj);
-unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan,
+unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru,
+ unsigned int nr_to_scan,
+ unsigned long *remaining,
bool (*shrink)(struct drm_gem_object *obj));
#endif /* __DRM_GEM_H__ */
/* delay between mkwrite and deferred handler */
unsigned long delay;
bool sort_pagereflist; /* sort pagelist by offset */
+ int open_count; /* number of opened files; protected by fb_info lock */
struct mutex lock; /* mutex that protects the pageref list */
struct list_head pagereflist; /* list of pagerefs for touched pages */
/* callback */