hvs->eof_irq[channel].enabled = false;
}
+static void vc4_hvs_free_dlist_entry_locked(struct vc4_hvs *hvs,
+ struct vc4_hvs_dlist_allocation *alloc);
+
static struct vc4_hvs_dlist_allocation *
vc4_hvs_alloc_dlist_entry(struct vc4_hvs *hvs,
unsigned int channel,
struct vc4_dev *vc4 = hvs->vc4;
struct drm_device *dev = &vc4->base;
struct vc4_hvs_dlist_allocation *alloc;
+ struct vc4_hvs_dlist_allocation *cur, *next;
unsigned long flags;
int ret;
dlist_count);
spin_unlock_irqrestore(&hvs->mm_lock, flags);
if (ret) {
- drm_err(dev, "Failed to allocate DLIST entry. Requested size=%zu. ret=%d\n",
- dlist_count, ret);
- return ERR_PTR(ret);
+ drm_err(dev, "Failed to allocate DLIST entry. Requested size=%zu. ret=%d. DISPCTRL is %08x\n",
+ dlist_count, ret, HVS_READ(SCALER_DISPCTRL));
+
+ /* This should never happen as stale entries should get released
+ * as the frame counter interrupt triggers.
+ * However we've seen this fail for reasons currently unknown.
+ * Free all stale entries now so we should be able to complete
+ * this allocation.
+ */
+ spin_lock_irqsave(&hvs->mm_lock, flags);
+ list_for_each_entry_safe(cur, next, &hvs->stale_dlist_entries, node) {
+ vc4_hvs_free_dlist_entry_locked(hvs, cur);
+ }
+
+ ret = drm_mm_insert_node(&hvs->dlist_mm, &alloc->mm_node,
+ dlist_count);
+ spin_unlock_irqrestore(&hvs->mm_lock, flags);
+
+ if (ret)
+ return ERR_PTR(ret);
}
alloc->channel = channel;