Merge tag 'drm-intel-fixes-2023-07-13' of git://anongit.freedesktop.org/drm/drm-intel...
authorDave Airlie <airlied@redhat.com>
Fri, 14 Jul 2023 01:13:14 +0000 (11:13 +1000)
committerDave Airlie <airlied@redhat.com>
Fri, 14 Jul 2023 01:13:15 +0000 (11:13 +1000)
- Don't preserve dpll_hw_state for slave crtc in Bigjoiner (Stanislav Lisovskiy)
- Consider OA buffer boundary when zeroing out reports [perf] (Umesh Nerlige Ramappa)
- Remove dead code from gen8_pte_encode (Tvrtko Ursulin)
- Fix one wrong caching mode enum usage (Tvrtko Ursulin)

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/ZK+nHLCltaxoxVw/@tursulin-desk
32 files changed:
drivers/accel/ivpu/ivpu_drv.h
drivers/accel/ivpu/ivpu_hw_mtl.c
drivers/dma-buf/dma-fence-unwrap.c
drivers/dma-buf/dma-fence.c
drivers/gpu/drm/armada/armada_fbdev.c
drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
drivers/gpu/drm/bridge/ti-sn65dsi86.c
drivers/gpu/drm/drm_client.c
drivers/gpu/drm/drm_fbdev_dma.c
drivers/gpu/drm/drm_fbdev_generic.c
drivers/gpu/drm/drm_syncobj.c
drivers/gpu/drm/exynos/exynos_drm_fbdev.c
drivers/gpu/drm/gma500/fbdev.c
drivers/gpu/drm/msm/msm_fbdev.c
drivers/gpu/drm/nouveau/dispnv50/disp.c
drivers/gpu/drm/nouveau/nouveau_chan.c
drivers/gpu/drm/nouveau/nouveau_chan.h
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c
drivers/gpu/drm/omapdrm/omap_fbdev.c
drivers/gpu/drm/panel/panel-simple.c
drivers/gpu/drm/radeon/radeon_fbdev.c
drivers/gpu/drm/scheduler/sched_entity.c
drivers/gpu/drm/scheduler/sched_fence.c
drivers/gpu/drm/scheduler/sched_main.c
drivers/gpu/drm/tegra/fbdev.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_resource.c
include/drm/gpu_scheduler.h
include/linux/dma-fence.h

index d3013fb..399dc5d 100644 (file)
@@ -75,6 +75,7 @@ struct ivpu_wa_table {
        bool punit_disabled;
        bool clear_runtime_mem;
        bool d3hot_after_power_off;
+       bool interrupt_clear_with_0;
 };
 
 struct ivpu_hw_info;
index fef3542..2a5dd3a 100644 (file)
@@ -101,6 +101,9 @@ static void ivpu_hw_wa_init(struct ivpu_device *vdev)
        vdev->wa.punit_disabled = ivpu_is_fpga(vdev);
        vdev->wa.clear_runtime_mem = false;
        vdev->wa.d3hot_after_power_off = true;
+
+       if (ivpu_device_id(vdev) == PCI_DEVICE_ID_MTL && ivpu_revision(vdev) < 4)
+               vdev->wa.interrupt_clear_with_0 = true;
 }
 
 static void ivpu_hw_timeouts_init(struct ivpu_device *vdev)
@@ -885,7 +888,7 @@ static void ivpu_hw_mtl_irq_disable(struct ivpu_device *vdev)
        REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x1);
        REGB_WR32(MTL_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_DISABLE_MASK);
        REGV_WR64(MTL_VPU_HOST_SS_ICB_ENABLE_0, 0x0ull);
-       REGB_WR32(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, 0x0);
+       REGV_WR32(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, 0x0);
 }
 
 static void ivpu_hw_mtl_irq_wdt_nce_handler(struct ivpu_device *vdev)
@@ -973,12 +976,15 @@ static u32 ivpu_hw_mtl_irqb_handler(struct ivpu_device *vdev, int irq)
                schedule_recovery = true;
        }
 
-       /*
-        * Clear local interrupt status by writing 0 to all bits.
-        * This must be done after interrupts are cleared at the source.
-        * Writing 1 triggers an interrupt, so we can't perform read update write.
-        */
-       REGB_WR32(MTL_BUTTRESS_INTERRUPT_STAT, 0x0);
+       /* This must be done after interrupts are cleared at the source. */
+       if (IVPU_WA(interrupt_clear_with_0))
+               /*
+                * Writing 1 triggers an interrupt, so we can't perform read update write.
+                * Clear local interrupt status by writing 0 to all bits.
+                */
+               REGB_WR32(MTL_BUTTRESS_INTERRUPT_STAT, 0x0);
+       else
+               REGB_WR32(MTL_BUTTRESS_INTERRUPT_STAT, status);
 
        /* Re-enable global interrupt */
        REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x0);
index 7002bca..c625bb2 100644 (file)
@@ -66,18 +66,36 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
 {
        struct dma_fence_array *result;
        struct dma_fence *tmp, **array;
+       ktime_t timestamp;
        unsigned int i;
        size_t count;
 
        count = 0;
+       timestamp = ns_to_ktime(0);
        for (i = 0; i < num_fences; ++i) {
-               dma_fence_unwrap_for_each(tmp, &iter[i], fences[i])
-                       if (!dma_fence_is_signaled(tmp))
+               dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) {
+                       if (!dma_fence_is_signaled(tmp)) {
                                ++count;
+                       } else if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT,
+                                           &tmp->flags)) {
+                               if (ktime_after(tmp->timestamp, timestamp))
+                                       timestamp = tmp->timestamp;
+                       } else {
+                               /*
+                                * Use the current time if the fence is
+                                * currently signaling.
+                                */
+                               timestamp = ktime_get();
+                       }
+               }
        }
 
+       /*
+        * If we couldn't find a pending fence just return a private signaled
+        * fence with the timestamp of the last signaled one.
+        */
        if (count == 0)
-               return dma_fence_get_stub();
+               return dma_fence_allocate_private_stub(timestamp);
 
        array = kmalloc_array(count, sizeof(*array), GFP_KERNEL);
        if (!array)
@@ -138,7 +156,7 @@ restart:
        } while (tmp);
 
        if (count == 0) {
-               tmp = dma_fence_get_stub();
+               tmp = dma_fence_allocate_private_stub(ktime_get());
                goto return_tmp;
        }
 
index f177c56..8aa8f8c 100644 (file)
@@ -150,16 +150,17 @@ EXPORT_SYMBOL(dma_fence_get_stub);
 
 /**
  * dma_fence_allocate_private_stub - return a private, signaled fence
+ * @timestamp: timestamp when the fence was signaled
  *
  * Return a newly allocated and signaled stub fence.
  */
-struct dma_fence *dma_fence_allocate_private_stub(void)
+struct dma_fence *dma_fence_allocate_private_stub(ktime_t timestamp)
 {
        struct dma_fence *fence;
 
        fence = kzalloc(sizeof(*fence), GFP_KERNEL);
        if (fence == NULL)
-               return ERR_PTR(-ENOMEM);
+               return NULL;
 
        dma_fence_init(fence,
                       &dma_fence_stub_ops,
@@ -169,7 +170,7 @@ struct dma_fence *dma_fence_allocate_private_stub(void)
        set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
                &fence->flags);
 
-       dma_fence_signal(fence);
+       dma_fence_signal_timestamp(fence, timestamp);
 
        return fence;
 }
index 3943e89..e40a95e 100644 (file)
@@ -209,10 +209,6 @@ void armada_fbdev_setup(struct drm_device *dev)
                goto err_drm_client_init;
        }
 
-       ret = armada_fbdev_client_hotplug(&fbh->client);
-       if (ret)
-               drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
-
        drm_client_register(&fbh->client);
 
        return;
index 9d6dcaf..7b66f36 100644 (file)
@@ -1426,9 +1426,9 @@ void dw_hdmi_set_high_tmds_clock_ratio(struct dw_hdmi *hdmi,
        /* Control for TMDS Bit Period/TMDS Clock-Period Ratio */
        if (dw_hdmi_support_scdc(hdmi, display)) {
                if (mtmdsclock > HDMI14_MAX_TMDSCLK)
-                       drm_scdc_set_high_tmds_clock_ratio(&hdmi->connector, 1);
+                       drm_scdc_set_high_tmds_clock_ratio(hdmi->curr_conn, 1);
                else
-                       drm_scdc_set_high_tmds_clock_ratio(&hdmi->connector, 0);
+                       drm_scdc_set_high_tmds_clock_ratio(hdmi->curr_conn, 0);
        }
 }
 EXPORT_SYMBOL_GPL(dw_hdmi_set_high_tmds_clock_ratio);
@@ -2116,7 +2116,7 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
                                min_t(u8, bytes, SCDC_MIN_SOURCE_VERSION));
 
                        /* Enabled Scrambling in the Sink */
-                       drm_scdc_set_scrambling(&hdmi->connector, 1);
+                       drm_scdc_set_scrambling(hdmi->curr_conn, 1);
 
                        /*
                         * To activate the scrambler feature, you must ensure
@@ -2132,7 +2132,7 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
                        hdmi_writeb(hdmi, 0, HDMI_FC_SCRAMBLER_CTRL);
                        hdmi_writeb(hdmi, (u8)~HDMI_MC_SWRSTZ_TMDSSWRST_REQ,
                                    HDMI_MC_SWRSTZ);
-                       drm_scdc_set_scrambling(&hdmi->connector, 0);
+                       drm_scdc_set_scrambling(hdmi->curr_conn, 0);
                }
        }
 
@@ -3553,6 +3553,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
        hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID
                         | DRM_BRIDGE_OP_HPD;
        hdmi->bridge.interlace_allowed = true;
+       hdmi->bridge.ddc = hdmi->ddc;
 #ifdef CONFIG_OF
        hdmi->bridge.of_node = pdev->dev.of_node;
 #endif
index c499a14..f448b90 100644 (file)
  * @pwm_refclk_freq: Cache for the reference clock input to the PWM.
  */
 struct ti_sn65dsi86 {
-       struct auxiliary_device         bridge_aux;
-       struct auxiliary_device         gpio_aux;
-       struct auxiliary_device         aux_aux;
-       struct auxiliary_device         pwm_aux;
+       struct auxiliary_device         *bridge_aux;
+       struct auxiliary_device         *gpio_aux;
+       struct auxiliary_device         *aux_aux;
+       struct auxiliary_device         *pwm_aux;
 
        struct device                   *dev;
        struct regmap                   *regmap;
@@ -468,27 +468,34 @@ static void ti_sn65dsi86_delete_aux(void *data)
        auxiliary_device_delete(data);
 }
 
-/*
- * AUX bus docs say that a non-NULL release is mandatory, but it makes no
- * sense for the model used here where all of the aux devices are allocated
- * in the single shared structure. We'll use this noop as a workaround.
- */
-static void ti_sn65dsi86_noop(struct device *dev) {}
+static void ti_sn65dsi86_aux_device_release(struct device *dev)
+{
+       struct auxiliary_device *aux = container_of(dev, struct auxiliary_device, dev);
+
+       kfree(aux);
+}
 
 static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata,
-                                      struct auxiliary_device *aux,
+                                      struct auxiliary_device **aux_out,
                                       const char *name)
 {
        struct device *dev = pdata->dev;
+       struct auxiliary_device *aux;
        int ret;
 
+       aux = kzalloc(sizeof(*aux), GFP_KERNEL);
+       if (!aux)
+               return -ENOMEM;
+
        aux->name = name;
        aux->dev.parent = dev;
-       aux->dev.release = ti_sn65dsi86_noop;
+       aux->dev.release = ti_sn65dsi86_aux_device_release;
        device_set_of_node_from_dev(&aux->dev, dev);
        ret = auxiliary_device_init(aux);
-       if (ret)
+       if (ret) {
+               kfree(aux);
                return ret;
+       }
        ret = devm_add_action_or_reset(dev, ti_sn65dsi86_uninit_aux, aux);
        if (ret)
                return ret;
@@ -497,6 +504,8 @@ static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata,
        if (ret)
                return ret;
        ret = devm_add_action_or_reset(dev, ti_sn65dsi86_delete_aux, aux);
+       if (!ret)
+               *aux_out = aux;
 
        return ret;
 }
index f6292ba..037e36f 100644 (file)
@@ -122,13 +122,34 @@ EXPORT_SYMBOL(drm_client_init);
  * drm_client_register() it is no longer permissible to call drm_client_release()
  * directly (outside the unregister callback), instead cleanup will happen
  * automatically on driver unload.
+ *
+ * Registering a client generates a hotplug event that allows the client
+ * to set up its display from pre-existing outputs. The client must have
+ * initialized its state to able to handle the hotplug event successfully.
  */
 void drm_client_register(struct drm_client_dev *client)
 {
        struct drm_device *dev = client->dev;
+       int ret;
 
        mutex_lock(&dev->clientlist_mutex);
        list_add(&client->list, &dev->clientlist);
+
+       if (client->funcs && client->funcs->hotplug) {
+               /*
+                * Perform an initial hotplug event to pick up the
+                * display configuration for the client. This step
+                * has to be performed *after* registering the client
+                * in the list of clients, or a concurrent hotplug
+                * event might be lost; leaving the display off.
+                *
+                * Hold the clientlist_mutex as for a regular hotplug
+                * event.
+                */
+               ret = client->funcs->hotplug(client);
+               if (ret)
+                       drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
+       }
        mutex_unlock(&dev->clientlist_mutex);
 }
 EXPORT_SYMBOL(drm_client_register);
index d86773f..f353daf 100644 (file)
@@ -217,7 +217,7 @@ static const struct drm_client_funcs drm_fbdev_dma_client_funcs = {
  * drm_fbdev_dma_setup() - Setup fbdev emulation for GEM DMA helpers
  * @dev: DRM device
  * @preferred_bpp: Preferred bits per pixel for the device.
- *                 @dev->mode_config.preferred_depth is used if this is zero.
+ *                 32 is used if this is zero.
  *
  * This function sets up fbdev emulation for GEM DMA drivers that support
  * dumb buffers with a virtual address and that can be mmap'ed.
@@ -252,10 +252,6 @@ void drm_fbdev_dma_setup(struct drm_device *dev, unsigned int preferred_bpp)
                goto err_drm_client_init;
        }
 
-       ret = drm_fbdev_dma_client_hotplug(&fb_helper->client);
-       if (ret)
-               drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
-
        drm_client_register(&fb_helper->client);
 
        return;
index 98ae703..b9343fb 100644 (file)
@@ -339,10 +339,6 @@ void drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
                goto err_drm_client_init;
        }
 
-       ret = drm_fbdev_generic_client_hotplug(&fb_helper->client);
-       if (ret)
-               drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
-
        drm_client_register(&fb_helper->client);
 
        return;
index 0c2be83..e592c5d 100644 (file)
@@ -353,10 +353,10 @@ EXPORT_SYMBOL(drm_syncobj_replace_fence);
  */
 static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
 {
-       struct dma_fence *fence = dma_fence_allocate_private_stub();
+       struct dma_fence *fence = dma_fence_allocate_private_stub(ktime_get());
 
-       if (IS_ERR(fence))
-               return PTR_ERR(fence);
+       if (!fence)
+               return -ENOMEM;
 
        drm_syncobj_replace_fence(syncobj, fence);
        dma_fence_put(fence);
index fdf6558..226310c 100644 (file)
@@ -215,10 +215,6 @@ void exynos_drm_fbdev_setup(struct drm_device *dev)
        if (ret)
                goto err_drm_client_init;
 
-       ret = exynos_drm_fbdev_client_hotplug(&fb_helper->client);
-       if (ret)
-               drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
-
        drm_client_register(&fb_helper->client);
 
        return;
index 955cbe9..0544265 100644 (file)
@@ -328,10 +328,6 @@ void psb_fbdev_setup(struct drm_psb_private *dev_priv)
                goto err_drm_fb_helper_unprepare;
        }
 
-       ret = psb_fbdev_client_hotplug(&fb_helper->client);
-       if (ret)
-               drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
-
        drm_client_register(&fb_helper->client);
 
        return;
index b933a85..bf1e17d 100644 (file)
@@ -246,10 +246,6 @@ void msm_fbdev_setup(struct drm_device *dev)
                goto err_drm_fb_helper_unprepare;
        }
 
-       ret = msm_fbdev_client_hotplug(&helper->client);
-       if (ret)
-               drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
-
        drm_client_register(&helper->client);
 
        return;
index 42e1665..38d3fad 100644 (file)
@@ -910,15 +910,19 @@ nv50_msto_prepare(struct drm_atomic_state *state,
        struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
        struct nv50_mstc *mstc = msto->mstc;
        struct nv50_mstm *mstm = mstc->mstm;
-       struct drm_dp_mst_atomic_payload *payload;
+       struct drm_dp_mst_topology_state *old_mst_state;
+       struct drm_dp_mst_atomic_payload *payload, *old_payload;
 
        NV_ATOMIC(drm, "%s: msto prepare\n", msto->encoder.name);
 
+       old_mst_state = drm_atomic_get_old_mst_topology_state(state, mgr);
+
        payload = drm_atomic_get_mst_payload_state(mst_state, mstc->port);
+       old_payload = drm_atomic_get_mst_payload_state(old_mst_state, mstc->port);
 
        // TODO: Figure out if we want to do a better job of handling VCPI allocation failures here?
        if (msto->disabled) {
-               drm_dp_remove_payload(mgr, mst_state, payload, payload);
+               drm_dp_remove_payload(mgr, mst_state, old_payload, payload);
 
                nvif_outp_dp_mst_vcpi(&mstm->outp->outp, msto->head->base.index, 0, 0, 0, 0);
        } else {
index e648ecd..3dfbc37 100644 (file)
@@ -90,6 +90,7 @@ nouveau_channel_del(struct nouveau_channel **pchan)
                if (cli)
                        nouveau_svmm_part(chan->vmm->svmm, chan->inst);
 
+               nvif_object_dtor(&chan->blit);
                nvif_object_dtor(&chan->nvsw);
                nvif_object_dtor(&chan->gart);
                nvif_object_dtor(&chan->vram);
index e06a8ff..bad7466 100644 (file)
@@ -53,6 +53,7 @@ struct nouveau_channel {
        u32 user_put;
 
        struct nvif_object user;
+       struct nvif_object blit;
 
        struct nvif_event kill;
        atomic_t killed;
index 7aac938..40fb9a8 100644 (file)
@@ -375,15 +375,29 @@ nouveau_accel_gr_init(struct nouveau_drm *drm)
                ret = nvif_object_ctor(&drm->channel->user, "drmNvsw",
                                       NVDRM_NVSW, nouveau_abi16_swclass(drm),
                                       NULL, 0, &drm->channel->nvsw);
+
+               if (ret == 0 && device->info.chipset >= 0x11) {
+                       ret = nvif_object_ctor(&drm->channel->user, "drmBlit",
+                                              0x005f, 0x009f,
+                                              NULL, 0, &drm->channel->blit);
+               }
+
                if (ret == 0) {
                        struct nvif_push *push = drm->channel->chan.push;
-                       ret = PUSH_WAIT(push, 2);
-                       if (ret == 0)
+                       ret = PUSH_WAIT(push, 8);
+                       if (ret == 0) {
+                               if (device->info.chipset >= 0x11) {
+                                       PUSH_NVSQ(push, NV05F, 0x0000, drm->channel->blit.handle);
+                                       PUSH_NVSQ(push, NV09F, 0x0120, 0,
+                                                              0x0124, 1,
+                                                              0x0128, 2);
+                               }
                                PUSH_NVSQ(push, NV_SW, 0x0000, drm->channel->nvsw.handle);
+                       }
                }
 
                if (ret) {
-                       NV_ERROR(drm, "failed to allocate sw class, %d\n", ret);
+                       NV_ERROR(drm, "failed to allocate sw or blit class, %d\n", ret);
                        nouveau_accel_gr_fini(drm);
                        return;
                }
index a4853c4..67ef889 100644 (file)
@@ -295,6 +295,7 @@ g94_sor = {
        .clock = nv50_sor_clock,
        .war_2 = g94_sor_war_2,
        .war_3 = g94_sor_war_3,
+       .hdmi = &g84_sor_hdmi,
        .dp = &g94_sor_dp,
 };
 
index a2c7c6f..506ffbe 100644 (file)
@@ -125,7 +125,7 @@ gt215_sor_hdmi_infoframe_avi(struct nvkm_ior *ior, int head, void *data, u32 siz
        pack_hdmi_infoframe(&avi, data, size);
 
        nvkm_mask(device, 0x61c520 + soff, 0x00000001, 0x00000000);
-       if (size)
+       if (!size)
                return;
 
        nvkm_wr32(device, 0x61c528 + soff, avi.header);
index 795f3a6..9b8ca4e 100644 (file)
@@ -224,7 +224,7 @@ nvkm_acr_oneinit(struct nvkm_subdev *subdev)
        u64 falcons;
        int ret, i;
 
-       if (list_empty(&acr->hsfw)) {
+       if (list_empty(&acr->hsfw) || !acr->func || !acr->func->wpr_layout) {
                nvkm_debug(subdev, "No HSFW(s)\n");
                nvkm_acr_cleanup(acr);
                return 0;
index b7ccce0..fe6639c 100644 (file)
@@ -318,10 +318,6 @@ void omap_fbdev_setup(struct drm_device *dev)
 
        INIT_WORK(&fbdev->work, pan_worker);
 
-       ret = omap_fbdev_client_hotplug(&helper->client);
-       if (ret)
-               drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
-
        drm_client_register(&helper->client);
 
        return;
index a247a0e..aaba36b 100644 (file)
@@ -2178,6 +2178,7 @@ static const struct panel_desc innolux_at043tn24 = {
                .height = 54,
        },
        .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+       .connector_type = DRM_MODE_CONNECTOR_DPI,
        .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
 };
 
@@ -3202,6 +3203,7 @@ static const struct drm_display_mode powertip_ph800480t013_idf02_mode = {
        .vsync_start = 480 + 49,
        .vsync_end = 480 + 49 + 2,
        .vtotal = 480 + 49 + 2 + 22,
+       .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
 };
 
 static const struct panel_desc powertip_ph800480t013_idf02  = {
index ab9c1ab..f941e2e 100644 (file)
@@ -383,10 +383,6 @@ void radeon_fbdev_setup(struct radeon_device *rdev)
                goto err_drm_client_init;
        }
 
-       ret = radeon_fbdev_client_hotplug(&fb_helper->client);
-       if (ret)
-               drm_dbg_kms(rdev->ddev, "client hotplug ret=%d\n", ret);
-
        drm_client_register(&fb_helper->client);
 
        return;
index b2bbc8a..a42763e 100644 (file)
@@ -176,16 +176,32 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
 {
        struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
                                                 finish_cb);
-       int r;
+       unsigned long index;
 
        dma_fence_put(f);
 
        /* Wait for all dependencies to avoid data corruptions */
-       while (!xa_empty(&job->dependencies)) {
-               f = xa_erase(&job->dependencies, job->last_dependency++);
-               r = dma_fence_add_callback(f, &job->finish_cb,
-                                          drm_sched_entity_kill_jobs_cb);
-               if (!r)
+       xa_for_each(&job->dependencies, index, f) {
+               struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
+
+               if (s_fence && f == &s_fence->scheduled) {
+                       /* The dependencies array had a reference on the scheduled
+                        * fence, and the finished fence refcount might have
+                        * dropped to zero. Use dma_fence_get_rcu() so we get
+                        * a NULL fence in that case.
+                        */
+                       f = dma_fence_get_rcu(&s_fence->finished);
+
+                       /* Now that we have a reference on the finished fence,
+                        * we can release the reference the dependencies array
+                        * had on the scheduled fence.
+                        */
+                       dma_fence_put(&s_fence->scheduled);
+               }
+
+               xa_erase(&job->dependencies, index);
+               if (f && !dma_fence_add_callback(f, &job->finish_cb,
+                                                drm_sched_entity_kill_jobs_cb))
                        return;
 
                dma_fence_put(f);
@@ -415,8 +431,17 @@ static struct dma_fence *
 drm_sched_job_dependency(struct drm_sched_job *job,
                         struct drm_sched_entity *entity)
 {
-       if (!xa_empty(&job->dependencies))
-               return xa_erase(&job->dependencies, job->last_dependency++);
+       struct dma_fence *f;
+
+       /* We keep the fence around, so we can iterate over all dependencies
+        * in drm_sched_entity_kill_jobs_cb() to ensure all deps are signaled
+        * before killing the job.
+        */
+       f = xa_load(&job->dependencies, job->last_dependency);
+       if (f) {
+               job->last_dependency++;
+               return dma_fence_get(f);
+       }
 
        if (job->sched->ops->prepare_job)
                return job->sched->ops->prepare_job(job, entity);
index ef12047..06cedfe 100644 (file)
@@ -48,8 +48,32 @@ static void __exit drm_sched_fence_slab_fini(void)
        kmem_cache_destroy(sched_fence_slab);
 }
 
-void drm_sched_fence_scheduled(struct drm_sched_fence *fence)
+static void drm_sched_fence_set_parent(struct drm_sched_fence *s_fence,
+                                      struct dma_fence *fence)
 {
+       /*
+        * smp_store_release() to ensure another thread racing us
+        * in drm_sched_fence_set_deadline_finished() sees the
+        * fence's parent set before test_bit()
+        */
+       smp_store_release(&s_fence->parent, dma_fence_get(fence));
+       if (test_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT,
+                    &s_fence->finished.flags))
+               dma_fence_set_deadline(fence, s_fence->deadline);
+}
+
+void drm_sched_fence_scheduled(struct drm_sched_fence *fence,
+                              struct dma_fence *parent)
+{
+       /* Set the parent before signaling the scheduled fence, such that,
+        * any waiter expecting the parent to be filled after the job has
+        * been scheduled (which is the case for drivers delegating waits
+        * to some firmware) doesn't have to busy wait for parent to show
+        * up.
+        */
+       if (!IS_ERR_OR_NULL(parent))
+               drm_sched_fence_set_parent(fence, parent);
+
        dma_fence_signal(&fence->scheduled);
 }
 
@@ -181,20 +205,6 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f)
 }
 EXPORT_SYMBOL(to_drm_sched_fence);
 
-void drm_sched_fence_set_parent(struct drm_sched_fence *s_fence,
-                               struct dma_fence *fence)
-{
-       /*
-        * smp_store_release() to ensure another thread racing us
-        * in drm_sched_fence_set_deadline_finished() sees the
-        * fence's parent set before test_bit()
-        */
-       smp_store_release(&s_fence->parent, dma_fence_get(fence));
-       if (test_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT,
-                    &s_fence->finished.flags))
-               dma_fence_set_deadline(fence, s_fence->deadline);
-}
-
 struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *entity,
                                              void *owner)
 {
index 7b2bfc1..506371c 100644 (file)
@@ -1043,10 +1043,9 @@ static int drm_sched_main(void *param)
                trace_drm_run_job(sched_job, entity);
                fence = sched->ops->run_job(sched_job);
                complete_all(&entity->entity_idle);
-               drm_sched_fence_scheduled(s_fence);
+               drm_sched_fence_scheduled(s_fence, fence);
 
                if (!IS_ERR_OR_NULL(fence)) {
-                       drm_sched_fence_set_parent(s_fence, fence);
                        /* Drop for original kref_init of the fence */
                        dma_fence_put(fence);
 
index e74d9be..d042234 100644 (file)
@@ -225,10 +225,6 @@ void tegra_fbdev_setup(struct drm_device *dev)
        if (ret)
                goto err_drm_client_init;
 
-       ret = tegra_fbdev_client_hotplug(&helper->client);
-       if (ret)
-               drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
-
        drm_client_register(&helper->client);
 
        return;
index bd5dae4..7139a52 100644 (file)
@@ -458,18 +458,18 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
                goto out;
        }
 
-bounce:
-       ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
-       if (ret == -EMULTIHOP) {
+       do {
+               ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
+               if (ret != -EMULTIHOP)
+                       break;
+
                ret = ttm_bo_bounce_temp_buffer(bo, &evict_mem, ctx, &hop);
-               if (ret) {
-                       if (ret != -ERESTARTSYS && ret != -EINTR)
-                               pr_err("Buffer eviction failed\n");
-                       ttm_resource_free(bo, &evict_mem);
-                       goto out;
-               }
-               /* try and move to final place now. */
-               goto bounce;
+       } while (!ret);
+
+       if (ret) {
+               ttm_resource_free(bo, &evict_mem);
+               if (ret != -ERESTARTSYS && ret != -EINTR)
+                       pr_err("Buffer eviction failed\n");
        }
 out:
        return ret;
@@ -517,6 +517,12 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
 {
        bool ret = false;
 
+       if (bo->pin_count) {
+               *locked = false;
+               *busy = false;
+               return false;
+       }
+
        if (bo->base.resv == ctx->resv) {
                dma_resv_assert_held(bo->base.resv);
                if (ctx->allow_res_evict)
@@ -1167,6 +1173,7 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
                ret = ttm_bo_handle_move_mem(bo, evict_mem, true, &ctx, &hop);
                if (unlikely(ret != 0)) {
                        WARN(ret == -EMULTIHOP, "Unexpected multihop in swaput - likely driver bug.\n");
+                       ttm_resource_free(bo, &evict_mem);
                        goto out;
                }
        }
index 7333f7a..46ff9c7 100644 (file)
@@ -86,6 +86,8 @@ static void ttm_lru_bulk_move_pos_tail(struct ttm_lru_bulk_move_pos *pos,
                                       struct ttm_resource *res)
 {
        if (pos->last != res) {
+               if (pos->first == res)
+                       pos->first = list_next_entry(res, lru);
                list_move(&res->lru, &pos->last->lru);
                pos->last = res;
        }
@@ -111,7 +113,8 @@ static void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk,
 {
        struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res);
 
-       if (unlikely(pos->first == res && pos->last == res)) {
+       if (unlikely(WARN_ON(!pos->first || !pos->last) ||
+                    (pos->first == res && pos->last == res))) {
                pos->first = NULL;
                pos->last = NULL;
        } else if (pos->first == res) {
index e95b483..f9544d9 100644 (file)
@@ -583,15 +583,14 @@ void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
 bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
 int drm_sched_entity_error(struct drm_sched_entity *entity);
 
-void drm_sched_fence_set_parent(struct drm_sched_fence *s_fence,
-                               struct dma_fence *fence);
 struct drm_sched_fence *drm_sched_fence_alloc(
        struct drm_sched_entity *s_entity, void *owner);
 void drm_sched_fence_init(struct drm_sched_fence *fence,
                          struct drm_sched_entity *entity);
 void drm_sched_fence_free(struct drm_sched_fence *fence);
 
-void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
+void drm_sched_fence_scheduled(struct drm_sched_fence *fence,
+                              struct dma_fence *parent);
 void drm_sched_fence_finished(struct drm_sched_fence *fence, int result);
 
 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
index d54b595..0d678e9 100644 (file)
@@ -606,7 +606,7 @@ static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr)
 void dma_fence_set_deadline(struct dma_fence *fence, ktime_t deadline);
 
 struct dma_fence *dma_fence_get_stub(void);
-struct dma_fence *dma_fence_allocate_private_stub(void);
+struct dma_fence *dma_fence_allocate_private_stub(ktime_t timestamp);
 u64 dma_fence_context_alloc(unsigned num);
 
 extern const struct dma_fence_ops dma_fence_array_ops;