Merge drm/drm-fixes into drm-misc-fixes
authorThomas Zimmermann <tzimmermann@suse.de>
Mon, 13 Mar 2023 09:14:05 +0000 (10:14 +0100)
committerThomas Zimmermann <tzimmermann@suse.de>
Mon, 13 Mar 2023 09:14:05 +0000 (10:14 +0100)
Backmerging to get latest upstream.

Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
13 files changed:
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/drm_gem_shmem_helper.c
drivers/gpu/drm/meson/meson_vpp.c
drivers/gpu/drm/msm/msm_gem_shrinker.c
drivers/gpu/drm/panfrost/panfrost_mmu.c
drivers/gpu/drm/sun4i/sun4i_drv.c
drivers/gpu/drm/ttm/ttm_device.c
drivers/gpu/drm/virtio/virtgpu_vq.c
drivers/video/fbdev/chipsfb.c
drivers/video/fbdev/core/fb_defio.c
include/drm/drm_gem.h
include/linux/fb.h

index 3d0a4da..261a62e 100644 (file)
@@ -2796,7 +2796,7 @@ u32 drm_edid_get_panel_id(struct i2c_adapter *adapter)
         * the EDID then we'll just return 0.
         */
 
-       base_block = kmalloc(EDID_LENGTH, GFP_KERNEL);
+       base_block = kzalloc(EDID_LENGTH, GFP_KERNEL);
        if (!base_block)
                return 0;
 
index 7a3cb08..a5d392f 100644 (file)
@@ -1388,10 +1388,13 @@ EXPORT_SYMBOL(drm_gem_lru_move_tail);
  *
  * @lru: The LRU to scan
  * @nr_to_scan: The number of pages to try to reclaim
+ * @remaining: The number of pages left to reclaim, should be initialized by caller
  * @shrink: Callback to try to shrink/reclaim the object.
  */
 unsigned long
-drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan,
+drm_gem_lru_scan(struct drm_gem_lru *lru,
+                unsigned int nr_to_scan,
+                unsigned long *remaining,
                 bool (*shrink)(struct drm_gem_object *obj))
 {
        struct drm_gem_lru still_in_lru;
@@ -1430,8 +1433,10 @@ drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan,
                 * hit shrinker in response to trying to get backing pages
                 * for this obj (ie. while it's lock is already held)
                 */
-               if (!dma_resv_trylock(obj->resv))
+               if (!dma_resv_trylock(obj->resv)) {
+                       *remaining += obj->size >> PAGE_SHIFT;
                        goto tail;
+               }
 
                if (shrink(obj)) {
                        freed += obj->size >> PAGE_SHIFT;
index 75185a9..2b2163c 100644 (file)
@@ -619,11 +619,14 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
        int ret;
 
        if (obj->import_attach) {
-               /* Drop the reference drm_gem_mmap_obj() acquired.*/
-               drm_gem_object_put(obj);
                vma->vm_private_data = NULL;
+               ret = dma_buf_mmap(obj->dma_buf, vma, 0);
+
+               /* Drop the reference drm_gem_mmap_obj() acquired.*/
+               if (!ret)
+                       drm_gem_object_put(obj);
 
-               return dma_buf_mmap(obj->dma_buf, vma, 0);
+               return ret;
        }
 
        ret = drm_gem_shmem_get_pages(shmem);
index 1548376..5df1957 100644 (file)
@@ -100,6 +100,8 @@ void meson_vpp_init(struct meson_drm *priv)
                               priv->io_base + _REG(VPP_DOLBY_CTRL));
                writel_relaxed(0x1020080,
                                priv->io_base + _REG(VPP_DUMMY_DATA1));
+               writel_relaxed(0x42020,
+                               priv->io_base + _REG(VPP_DUMMY_DATA));
        } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
                writel_relaxed(0xf, priv->io_base + _REG(DOLBY_PATH_CTRL));
 
index 051bdbc..f38296a 100644 (file)
@@ -107,6 +107,7 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
                bool (*shrink)(struct drm_gem_object *obj);
                bool cond;
                unsigned long freed;
+               unsigned long remaining;
        } stages[] = {
                /* Stages of progressively more aggressive/expensive reclaim: */
                { &priv->lru.dontneed, purge,        true },
@@ -116,14 +117,18 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
        };
        long nr = sc->nr_to_scan;
        unsigned long freed = 0;
+       unsigned long remaining = 0;
 
        for (unsigned i = 0; (nr > 0) && (i < ARRAY_SIZE(stages)); i++) {
                if (!stages[i].cond)
                        continue;
                stages[i].freed =
-                       drm_gem_lru_scan(stages[i].lru, nr, stages[i].shrink);
+                       drm_gem_lru_scan(stages[i].lru, nr,
+                                       &stages[i].remaining,
+                                        stages[i].shrink);
                nr -= stages[i].freed;
                freed += stages[i].freed;
+               remaining += stages[i].remaining;
        }
 
        if (freed) {
@@ -132,7 +137,7 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
                                     stages[3].freed);
        }
 
-       return (freed > 0) ? freed : SHRINK_STOP;
+       return (freed > 0 && remaining > 0) ? freed : SHRINK_STOP;
 }
 
 #ifdef CONFIG_DEBUG_FS
@@ -182,10 +187,12 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
                NULL,
        };
        unsigned idx, unmapped = 0;
+       unsigned long remaining = 0;
 
        for (idx = 0; lrus[idx] && unmapped < vmap_shrink_limit; idx++) {
                unmapped += drm_gem_lru_scan(lrus[idx],
                                             vmap_shrink_limit - unmapped,
+                                            &remaining,
                                             vmap_shrink);
        }
 
index 4e83a18..666a5e5 100644 (file)
@@ -282,7 +282,7 @@ static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
        if (pm_runtime_active(pfdev->dev))
                mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT);
 
-       pm_runtime_put_sync_autosuspend(pfdev->dev);
+       pm_runtime_put_autosuspend(pfdev->dev);
 }
 
 static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
index cc94efb..d6c7417 100644 (file)
@@ -95,12 +95,12 @@ static int sun4i_drv_bind(struct device *dev)
        /* drm_vblank_init calls kcalloc, which can fail */
        ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
        if (ret)
-               goto cleanup_mode_config;
+               goto unbind_all;
 
        /* Remove early framebuffers (ie. simplefb) */
        ret = drm_aperture_remove_framebuffers(false, &sun4i_drv_driver);
        if (ret)
-               goto cleanup_mode_config;
+               goto unbind_all;
 
        sun4i_framebuffer_init(drm);
 
@@ -119,6 +119,8 @@ static int sun4i_drv_bind(struct device *dev)
 
 finish_poll:
        drm_kms_helper_poll_fini(drm);
+unbind_all:
+       component_unbind_all(dev, NULL);
 cleanup_mode_config:
        drm_mode_config_cleanup(drm);
        of_reserved_mem_device_release(dev);
index c7a1862..ae2f19d 100644 (file)
@@ -158,7 +158,7 @@ int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
                        struct ttm_buffer_object *bo = res->bo;
                        uint32_t num_pages;
 
-                       if (!bo)
+                       if (!bo || bo->resource != res)
                                continue;
 
                        num_pages = PFN_UP(bo->base.size);
index a04a9b2..1778a20 100644 (file)
@@ -604,7 +604,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
        bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
 
        if (virtio_gpu_is_shmem(bo) && use_dma_api)
-               dma_sync_sgtable_for_device(&vgdev->vdev->dev,
+               dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
                                            bo->base.sgt, DMA_TO_DEVICE);
 
        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
@@ -1026,7 +1026,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
        bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
 
        if (virtio_gpu_is_shmem(bo) && use_dma_api)
-               dma_sync_sgtable_for_device(&vgdev->vdev->dev,
+               dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
                                            bo->base.sgt, DMA_TO_DEVICE);
 
        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
index cc37ec3..7799d52 100644 (file)
@@ -358,16 +358,21 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
        if (rc)
                return rc;
 
-       if (pci_enable_device(dp) < 0) {
+       rc = pci_enable_device(dp);
+       if (rc < 0) {
                dev_err(&dp->dev, "Cannot enable PCI device\n");
                goto err_out;
        }
 
-       if ((dp->resource[0].flags & IORESOURCE_MEM) == 0)
+       if ((dp->resource[0].flags & IORESOURCE_MEM) == 0) {
+               rc = -ENODEV;
                goto err_disable;
+       }
        addr = pci_resource_start(dp, 0);
-       if (addr == 0)
+       if (addr == 0) {
+               rc = -ENODEV;
                goto err_disable;
+       }
 
        p = framebuffer_alloc(0, &dp->dev);
        if (p == NULL) {
@@ -417,7 +422,8 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
 
        init_chips(p, addr);
 
-       if (register_framebuffer(p) < 0) {
+       rc = register_framebuffer(p);
+       if (rc < 0) {
                dev_err(&dp->dev,"C&T 65550 framebuffer failed to register\n");
                goto err_unmap;
        }
index aa5f059..274f5d0 100644 (file)
@@ -305,17 +305,18 @@ void fb_deferred_io_open(struct fb_info *info,
                         struct inode *inode,
                         struct file *file)
 {
+       struct fb_deferred_io *fbdefio = info->fbdefio;
+
        file->f_mapping->a_ops = &fb_deferred_io_aops;
+       fbdefio->open_count++;
 }
 EXPORT_SYMBOL_GPL(fb_deferred_io_open);
 
-void fb_deferred_io_release(struct fb_info *info)
+static void fb_deferred_io_lastclose(struct fb_info *info)
 {
-       struct fb_deferred_io *fbdefio = info->fbdefio;
        struct page *page;
        int i;
 
-       BUG_ON(!fbdefio);
        cancel_delayed_work_sync(&info->deferred_work);
 
        /* clear out the mapping that we setup */
@@ -324,13 +325,21 @@ void fb_deferred_io_release(struct fb_info *info)
                page->mapping = NULL;
        }
 }
+
+void fb_deferred_io_release(struct fb_info *info)
+{
+       struct fb_deferred_io *fbdefio = info->fbdefio;
+
+       if (!--fbdefio->open_count)
+               fb_deferred_io_lastclose(info);
+}
 EXPORT_SYMBOL_GPL(fb_deferred_io_release);
 
 void fb_deferred_io_cleanup(struct fb_info *info)
 {
        struct fb_deferred_io *fbdefio = info->fbdefio;
 
-       fb_deferred_io_release(info);
+       fb_deferred_io_lastclose(info);
 
        kvfree(info->pagerefs);
        mutex_destroy(&fbdefio->lock);
index 772a4ad..f1f00fc 100644 (file)
@@ -476,7 +476,9 @@ int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
 void drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock);
 void drm_gem_lru_remove(struct drm_gem_object *obj);
 void drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj);
-unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan,
+unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru,
+                              unsigned int nr_to_scan,
+                              unsigned long *remaining,
                               bool (*shrink)(struct drm_gem_object *obj));
 
 #endif /* __DRM_GEM_H__ */
index d8d2051..02d09cb 100644 (file)
@@ -212,6 +212,7 @@ struct fb_deferred_io {
        /* delay between mkwrite and deferred handler */
        unsigned long delay;
        bool sort_pagereflist; /* sort pagelist by offset */
+       int open_count; /* number of opened files; protected by fb_info lock */
        struct mutex lock; /* mutex that protects the pageref list */
        struct list_head pagereflist; /* list of pagerefs for touched pages */
        /* callback */