1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2015 Broadcom
9 * The Hardware Video Scaler (HVS) is the piece of hardware that does
10 * translation, scaling, colorspace conversion, and compositing of
11 * pixels stored in framebuffers into a FIFO of pixels going out to
12 * the Pixel Valve (CRTC). It operates at the system clock rate (the
13 * system audio clock gate, specifically), which is much higher than
14 * the pixel clock rate.
16 * There is a single global HVS, with multiple output FIFOs that can
17 * be consumed by the PVs. This file just manages the resources for
18 * the HVS, while the vc4_crtc.c code actually drives HVS setup for
22 #include <linux/bitfield.h>
23 #include <linux/clk.h>
24 #include <linux/component.h>
25 #include <linux/platform_device.h>
27 #include <drm/drm_atomic_helper.h>
28 #include <drm/drm_drv.h>
29 #include <drm/drm_vblank.h>
31 #include <soc/bcm2835/raspberrypi-firmware.h>
36 static const struct debugfs_reg32 hvs_regs[] = {
37 VC4_REG32(SCALER_DISPCTRL),
38 VC4_REG32(SCALER_DISPSTAT),
39 VC4_REG32(SCALER_DISPID),
40 VC4_REG32(SCALER_DISPECTRL),
41 VC4_REG32(SCALER_DISPPROF),
42 VC4_REG32(SCALER_DISPDITHER),
43 VC4_REG32(SCALER_DISPEOLN),
44 VC4_REG32(SCALER_DISPLIST0),
45 VC4_REG32(SCALER_DISPLIST1),
46 VC4_REG32(SCALER_DISPLIST2),
47 VC4_REG32(SCALER_DISPLSTAT),
48 VC4_REG32(SCALER_DISPLACT0),
49 VC4_REG32(SCALER_DISPLACT1),
50 VC4_REG32(SCALER_DISPLACT2),
51 VC4_REG32(SCALER_DISPCTRL0),
52 VC4_REG32(SCALER_DISPBKGND0),
53 VC4_REG32(SCALER_DISPSTAT0),
54 VC4_REG32(SCALER_DISPBASE0),
55 VC4_REG32(SCALER_DISPCTRL1),
56 VC4_REG32(SCALER_DISPBKGND1),
57 VC4_REG32(SCALER_DISPSTAT1),
58 VC4_REG32(SCALER_DISPBASE1),
59 VC4_REG32(SCALER_DISPCTRL2),
60 VC4_REG32(SCALER_DISPBKGND2),
61 VC4_REG32(SCALER_DISPSTAT2),
62 VC4_REG32(SCALER_DISPBASE2),
63 VC4_REG32(SCALER_DISPALPHA2),
64 VC4_REG32(SCALER_OLEDOFFS),
65 VC4_REG32(SCALER_OLEDCOEF0),
66 VC4_REG32(SCALER_OLEDCOEF1),
67 VC4_REG32(SCALER_OLEDCOEF2),
70 void vc4_hvs_dump_state(struct vc4_hvs *hvs)
72 struct drm_device *drm = &hvs->vc4->base;
73 struct drm_printer p = drm_info_printer(&hvs->pdev->dev);
76 if (!drm_dev_enter(drm, &idx))
79 drm_print_regset32(&p, &hvs->regset);
81 DRM_INFO("HVS ctx:\n");
82 for (i = 0; i < 64; i += 4) {
83 DRM_INFO("0x%08x (%s): 0x%08x 0x%08x 0x%08x 0x%08x\n",
84 i * 4, i < HVS_BOOTLOADER_DLIST_END ? "B" : "D",
85 readl((u32 __iomem *)hvs->dlist + i + 0),
86 readl((u32 __iomem *)hvs->dlist + i + 1),
87 readl((u32 __iomem *)hvs->dlist + i + 2),
88 readl((u32 __iomem *)hvs->dlist + i + 3));
94 static int vc4_hvs_debugfs_underrun(struct seq_file *m, void *data)
96 struct drm_debugfs_entry *entry = m->private;
97 struct drm_device *dev = entry->dev;
98 struct vc4_dev *vc4 = to_vc4_dev(dev);
99 struct drm_printer p = drm_seq_file_printer(m);
101 drm_printf(&p, "%d\n", atomic_read(&vc4->underrun));
106 static int vc4_hvs_debugfs_dlist(struct seq_file *m, void *data)
108 struct drm_debugfs_entry *entry = m->private;
109 struct drm_device *dev = entry->dev;
110 struct vc4_dev *vc4 = to_vc4_dev(dev);
111 struct vc4_hvs *hvs = vc4->hvs;
112 struct drm_printer p = drm_seq_file_printer(m);
113 unsigned int next_entry_start = 0;
115 u32 dlist_word, dispstat;
117 for (i = 0; i < SCALER_CHANNELS_COUNT; i++) {
118 dispstat = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTATX(i)),
119 SCALER_DISPSTATX_MODE);
120 if (dispstat == SCALER_DISPSTATX_MODE_DISABLED ||
121 dispstat == SCALER_DISPSTATX_MODE_EOF) {
122 drm_printf(&p, "HVS chan %u disabled\n", i);
126 drm_printf(&p, "HVS chan %u:\n", i);
128 for (j = HVS_READ(SCALER_DISPLISTX(i)); j < 256; j++) {
129 dlist_word = readl((u32 __iomem *)vc4->hvs->dlist + j);
130 drm_printf(&p, "dlist: %02d: 0x%08x\n", j,
132 if (!next_entry_start ||
133 next_entry_start == j) {
134 if (dlist_word & SCALER_CTL0_END)
136 next_entry_start = j +
137 VC4_GET_FIELD(dlist_word,
146 static int vc5_hvs_debugfs_gamma(struct seq_file *m, void *data)
148 struct drm_info_node *node = m->private;
149 struct drm_device *dev = node->minor->dev;
150 struct vc4_dev *vc4 = to_vc4_dev(dev);
151 struct vc4_hvs *hvs = vc4->hvs;
152 struct drm_printer p = drm_seq_file_printer(m);
153 unsigned int i, chan;
154 u32 dispstat, dispbkgndx;
156 for (chan = 0; chan < SCALER_CHANNELS_COUNT; chan++) {
158 u32 offset = SCALER5_DSPGAMMA_START +
159 chan * SCALER5_DSPGAMMA_CHAN_OFFSET;
161 dispstat = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTATX(chan)),
162 SCALER_DISPSTATX_MODE);
163 if (dispstat == SCALER_DISPSTATX_MODE_DISABLED ||
164 dispstat == SCALER_DISPSTATX_MODE_EOF) {
165 drm_printf(&p, "HVS channel %u: Channel disabled\n", chan);
169 dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(chan));
170 if (!(dispbkgndx & SCALER_DISPBKGND_GAMMA)) {
171 drm_printf(&p, "HVS channel %u: Gamma disabled\n", chan);
175 drm_printf(&p, "HVS channel %u:\n", chan);
176 drm_printf(&p, " red:\n");
177 for (i = 0; i < SCALER5_DSPGAMMA_NUM_POINTS; i++, offset += 8) {
178 x_c = HVS_READ(offset);
179 grad = HVS_READ(offset + 4);
180 drm_printf(&p, " %08x %08x - x %u, c %u, grad %u\n",
182 VC4_GET_FIELD(x_c, SCALER5_DSPGAMMA_OFF_X),
183 VC4_GET_FIELD(x_c, SCALER5_DSPGAMMA_OFF_C),
186 drm_printf(&p, " green:\n");
187 for (i = 0; i < SCALER5_DSPGAMMA_NUM_POINTS; i++, offset += 8) {
188 x_c = HVS_READ(offset);
189 grad = HVS_READ(offset + 4);
190 drm_printf(&p, " %08x %08x - x %u, c %u, grad %u\n",
192 VC4_GET_FIELD(x_c, SCALER5_DSPGAMMA_OFF_X),
193 VC4_GET_FIELD(x_c, SCALER5_DSPGAMMA_OFF_C),
196 drm_printf(&p, " blue:\n");
197 for (i = 0; i < SCALER5_DSPGAMMA_NUM_POINTS; i++, offset += 8) {
198 x_c = HVS_READ(offset);
199 grad = HVS_READ(offset + 4);
200 drm_printf(&p, " %08x %08x - x %u, c %u, grad %u\n",
202 VC4_GET_FIELD(x_c, SCALER5_DSPGAMMA_OFF_X),
203 VC4_GET_FIELD(x_c, SCALER5_DSPGAMMA_OFF_C),
207 /* Alpha only valid on channel 2 */
211 drm_printf(&p, " alpha:\n");
212 for (i = 0; i < SCALER5_DSPGAMMA_NUM_POINTS; i++, offset += 8) {
213 x_c = HVS_READ(offset);
214 grad = HVS_READ(offset + 4);
215 drm_printf(&p, " %08x %08x - x %u, c %u, grad %u\n",
217 VC4_GET_FIELD(x_c, SCALER5_DSPGAMMA_OFF_X),
218 VC4_GET_FIELD(x_c, SCALER5_DSPGAMMA_OFF_C),
225 /* The filter kernel is composed of dwords each containing 3 9-bit
226 * signed integers packed next to each other.
228 #define VC4_INT_TO_COEFF(coeff) (coeff & 0x1ff)
229 #define VC4_PPF_FILTER_WORD(c0, c1, c2) \
230 ((((c0) & 0x1ff) << 0) | \
231 (((c1) & 0x1ff) << 9) | \
232 (((c2) & 0x1ff) << 18))
234 /* The whole filter kernel is arranged as the coefficients 0-16 going
235 * up, then a pad, then 17-31 going down and reversed within the
236 * dwords. This means that a linear phase kernel (where it's
237 * symmetrical at the boundary between 15 and 16) has the last 5
238 * dwords matching the first 5, but reversed.
240 #define VC4_LINEAR_PHASE_KERNEL(c0, c1, c2, c3, c4, c5, c6, c7, c8, \
241 c9, c10, c11, c12, c13, c14, c15) \
242 {VC4_PPF_FILTER_WORD(c0, c1, c2), \
243 VC4_PPF_FILTER_WORD(c3, c4, c5), \
244 VC4_PPF_FILTER_WORD(c6, c7, c8), \
245 VC4_PPF_FILTER_WORD(c9, c10, c11), \
246 VC4_PPF_FILTER_WORD(c12, c13, c14), \
247 VC4_PPF_FILTER_WORD(c15, c15, 0)}
249 #define VC4_LINEAR_PHASE_KERNEL_DWORDS 6
250 #define VC4_KERNEL_DWORDS (VC4_LINEAR_PHASE_KERNEL_DWORDS * 2 - 1)
252 /* Recommended B=1/3, C=1/3 filter choice from Mitchell/Netravali.
253 * http://www.cs.utexas.edu/~fussell/courses/cs384g/lectures/mitchell/Mitchell.pdf
255 static const u32 mitchell_netravali_1_3_1_3_kernel[] =
256 VC4_LINEAR_PHASE_KERNEL(0, -2, -6, -8, -10, -8, -3, 2, 18,
257 50, 82, 119, 155, 187, 213, 227);
259 static int vc4_hvs_upload_linear_kernel(struct vc4_hvs *hvs,
260 struct drm_mm_node *space,
264 u32 __iomem *dst_kernel;
267 * NOTE: We don't need a call to drm_dev_enter()/drm_dev_exit()
268 * here since that function is only called from vc4_hvs_bind().
271 ret = drm_mm_insert_node(&hvs->dlist_mm, space, VC4_KERNEL_DWORDS);
273 DRM_ERROR("Failed to allocate space for filter kernel: %d\n",
278 dst_kernel = hvs->dlist + space->start;
280 for (i = 0; i < VC4_KERNEL_DWORDS; i++) {
281 if (i < VC4_LINEAR_PHASE_KERNEL_DWORDS)
282 writel(kernel[i], &dst_kernel[i]);
284 writel(kernel[VC4_KERNEL_DWORDS - i - 1],
292 static void vc4_hvs_lut_load(struct vc4_hvs *hvs,
293 struct vc4_crtc *vc4_crtc)
295 struct drm_device *drm = &hvs->vc4->base;
296 struct drm_crtc *crtc = &vc4_crtc->base;
297 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
301 if (!drm_dev_enter(drm, &idx))
304 /* The LUT memory is laid out with each HVS channel in order,
305 * each of which takes 256 writes for R, 256 for G, then 256
308 HVS_WRITE(SCALER_GAMADDR,
309 SCALER_GAMADDR_AUTOINC |
310 (vc4_state->assigned_channel * 3 * crtc->gamma_size));
312 for (i = 0; i < crtc->gamma_size; i++)
313 HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_r[i]);
314 for (i = 0; i < crtc->gamma_size; i++)
315 HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_g[i]);
316 for (i = 0; i < crtc->gamma_size; i++)
317 HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_b[i]);
322 static void vc4_hvs_update_gamma_lut(struct vc4_hvs *hvs,
323 struct vc4_crtc *vc4_crtc)
325 struct drm_crtc *crtc = &vc4_crtc->base;
326 struct drm_crtc_state *crtc_state = crtc->state;
327 struct drm_color_lut *lut = crtc_state->gamma_lut->data;
328 u32 length = drm_color_lut_size(crtc_state->gamma_lut);
331 for (i = 0; i < length; i++) {
332 vc4_crtc->lut_r[i] = drm_color_lut_extract(lut[i].red, 8);
333 vc4_crtc->lut_g[i] = drm_color_lut_extract(lut[i].green, 8);
334 vc4_crtc->lut_b[i] = drm_color_lut_extract(lut[i].blue, 8);
337 vc4_hvs_lut_load(hvs, vc4_crtc);
340 static void vc5_hvs_write_gamma_entry(struct vc4_hvs *hvs,
342 struct vc5_gamma_entry *gamma)
344 HVS_WRITE(offset, gamma->x_c_terms);
345 HVS_WRITE(offset + 4, gamma->grad_term);
348 static void vc5_hvs_lut_load(struct vc4_hvs *hvs,
349 struct vc4_crtc *vc4_crtc)
351 struct drm_crtc *crtc = &vc4_crtc->base;
352 struct drm_crtc_state *crtc_state = crtc->state;
353 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
355 u32 offset = SCALER5_DSPGAMMA_START +
356 vc4_state->assigned_channel * SCALER5_DSPGAMMA_CHAN_OFFSET;
358 for (i = 0; i < SCALER5_DSPGAMMA_NUM_POINTS; i++, offset += 8)
359 vc5_hvs_write_gamma_entry(hvs, offset, &vc4_crtc->pwl_r[i]);
360 for (i = 0; i < SCALER5_DSPGAMMA_NUM_POINTS; i++, offset += 8)
361 vc5_hvs_write_gamma_entry(hvs, offset, &vc4_crtc->pwl_g[i]);
362 for (i = 0; i < SCALER5_DSPGAMMA_NUM_POINTS; i++, offset += 8)
363 vc5_hvs_write_gamma_entry(hvs, offset, &vc4_crtc->pwl_b[i]);
365 if (vc4_state->assigned_channel == 2) {
366 /* Alpha only valid on channel 2 */
367 for (i = 0; i < SCALER5_DSPGAMMA_NUM_POINTS; i++, offset += 8)
368 vc5_hvs_write_gamma_entry(hvs, offset, &vc4_crtc->pwl_a[i]);
372 static void vc5_hvs_update_gamma_lut(struct vc4_hvs *hvs,
373 struct vc4_crtc *vc4_crtc)
375 struct drm_crtc *crtc = &vc4_crtc->base;
376 struct drm_color_lut *lut = crtc->state->gamma_lut->data;
377 unsigned int step, i;
380 #define VC5_HVS_UPDATE_GAMMA_ENTRY_FROM_LUT(pwl, chan) \
381 start = drm_color_lut_extract(lut[i * step].chan, 12); \
382 end = drm_color_lut_extract(lut[(i + 1) * step - 1].chan, 12); \
384 /* Negative gradients not permitted by the hardware, so \
385 * flatten such points out. \
390 /* Assume 12bit pipeline. \
391 * X evenly spread over full range (12 bit). \
392 * C as U12.4 format. \
393 * Gradient as U4.8 format. \
396 VC5_HVS_SET_GAMMA_ENTRY(i << 8, start << 4, \
397 ((end - start) << 4) / (step - 1))
399 /* HVS5 has a 16 point piecewise linear function for each colour
400 * channel (including alpha on channel 2) on each display channel.
402 * Currently take a crude subsample of the gamma LUT, but this could
403 * be improved to implement curve fitting.
405 step = crtc->gamma_size / SCALER5_DSPGAMMA_NUM_POINTS;
406 for (i = 0; i < SCALER5_DSPGAMMA_NUM_POINTS; i++) {
407 VC5_HVS_UPDATE_GAMMA_ENTRY_FROM_LUT(pwl_r, red);
408 VC5_HVS_UPDATE_GAMMA_ENTRY_FROM_LUT(pwl_g, green);
409 VC5_HVS_UPDATE_GAMMA_ENTRY_FROM_LUT(pwl_b, blue);
412 vc5_hvs_lut_load(hvs, vc4_crtc);
415 static void vc4_hvs_irq_enable_eof(const struct vc4_hvs *hvs,
416 unsigned int channel)
418 struct vc4_dev *vc4 = hvs->vc4;
419 u32 irq_mask = vc4->is_vc5 ?
420 SCALER5_DISPCTRL_DSPEIEOF(channel) :
421 SCALER_DISPCTRL_DSPEIEOF(channel);
423 HVS_WRITE(SCALER_DISPCTRL,
424 HVS_READ(SCALER_DISPCTRL) | irq_mask);
427 static void vc4_hvs_irq_clear_eof(const struct vc4_hvs *hvs,
428 unsigned int channel)
430 struct vc4_dev *vc4 = hvs->vc4;
431 u32 irq_mask = vc4->is_vc5 ?
432 SCALER5_DISPCTRL_DSPEIEOF(channel) :
433 SCALER_DISPCTRL_DSPEIEOF(channel);
435 HVS_WRITE(SCALER_DISPCTRL,
436 HVS_READ(SCALER_DISPCTRL) & ~irq_mask);
439 static struct vc4_hvs_dlist_allocation *
440 vc4_hvs_alloc_dlist_entry(struct vc4_hvs *hvs,
441 unsigned int channel,
444 struct vc4_hvs_dlist_allocation *alloc;
448 if (channel == VC4_HVS_CHANNEL_DISABLED)
451 alloc = kzalloc(sizeof(*alloc), GFP_KERNEL);
453 return ERR_PTR(-ENOMEM);
455 INIT_LIST_HEAD(&alloc->node);
457 spin_lock_irqsave(&hvs->mm_lock, flags);
458 ret = drm_mm_insert_node(&hvs->dlist_mm, &alloc->mm_node,
460 spin_unlock_irqrestore(&hvs->mm_lock, flags);
464 alloc->channel = channel;
469 static void vc4_hvs_free_dlist_entry_locked(struct vc4_hvs *hvs,
470 struct vc4_hvs_dlist_allocation *alloc)
472 lockdep_assert_held(&hvs->mm_lock);
474 if (!list_empty(&alloc->node))
475 list_del(&alloc->node);
477 drm_mm_remove_node(&alloc->mm_node);
481 void vc4_hvs_mark_dlist_entry_stale(struct vc4_hvs *hvs,
482 struct vc4_hvs_dlist_allocation *alloc)
490 if (!drm_mm_node_allocated(&alloc->mm_node))
494 * Kunit tests run with a mock device and we consider any hardware
495 * access a test failure. Let's free the dlist allocation right away if
496 * we're running under kunit, we won't risk a dlist corruption anyway.
498 if (kunit_get_current_test()) {
499 spin_lock_irqsave(&hvs->mm_lock, flags);
500 vc4_hvs_free_dlist_entry_locked(hvs, alloc);
501 spin_unlock_irqrestore(&hvs->mm_lock, flags);
505 frcnt = vc4_hvs_get_fifo_frame_count(hvs, alloc->channel);
506 alloc->target_frame_count = (frcnt + 1) & ((1 << 6) - 1);
508 spin_lock_irqsave(&hvs->mm_lock, flags);
510 list_add_tail(&alloc->node, &hvs->stale_dlist_entries);
512 HVS_WRITE(SCALER_DISPSTAT, SCALER_DISPSTAT_EOF(alloc->channel));
513 vc4_hvs_irq_enable_eof(hvs, alloc->channel);
515 spin_unlock_irqrestore(&hvs->mm_lock, flags);
518 static void vc4_hvs_schedule_dlist_sweep(struct vc4_hvs *hvs,
519 unsigned int channel)
523 spin_lock_irqsave(&hvs->mm_lock, flags);
525 if (!list_empty(&hvs->stale_dlist_entries))
526 queue_work(system_unbound_wq, &hvs->free_dlist_work);
528 vc4_hvs_irq_clear_eof(hvs, channel);
530 spin_unlock_irqrestore(&hvs->mm_lock, flags);
534 * Frame counts are essentially sequence numbers over 6 bits, and we
535 * thus can use sequence number arithmetic and follow the RFC1982 to
536 * implement proper comparison between them.
538 static bool vc4_hvs_frcnt_lte(u8 cnt1, u8 cnt2)
540 return (s8)((cnt1 << 2) - (cnt2 << 2)) <= 0;
544 * Some atomic commits (legacy cursor updates, mostly) will not wait for
545 * the next vblank and will just return once the commit has been pushed
548 * On the hardware side, our HVS stores the planes parameters in its
549 * context RAM, and will use part of the RAM to store data during the
552 * This interacts badly if we get multiple commits before the next
553 * vblank since we could end up overwriting the DLIST entries used by
554 * previous commits if our dlist allocation reuses that entry. In such a
555 * case, we would overwrite the data currently being used by the
556 * hardware, resulting in a corrupted frame.
558 * In order to work around this, we'll queue the dlist entries in a list
559 * once the associated CRTC state is destroyed. The HVS only allows us
560 * to know which entry is being active, but not which one are no longer
561 * being used, so in order to avoid freeing entries that are still used
562 * by the hardware we add a guesstimate of the frame count where our
563 * entry will no longer be used, and thus will only free those entries
564 * when we will have reached that frame count.
566 static void vc4_hvs_dlist_free_work(struct work_struct *work)
568 struct vc4_hvs *hvs = container_of(work, struct vc4_hvs, free_dlist_work);
569 struct vc4_hvs_dlist_allocation *cur, *next;
572 spin_lock_irqsave(&hvs->mm_lock, flags);
573 list_for_each_entry_safe(cur, next, &hvs->stale_dlist_entries, node) {
576 frcnt = vc4_hvs_get_fifo_frame_count(hvs, cur->channel);
577 if (!vc4_hvs_frcnt_lte(cur->target_frame_count, frcnt))
580 vc4_hvs_free_dlist_entry_locked(hvs, cur);
582 spin_unlock_irqrestore(&hvs->mm_lock, flags);
585 u8 vc4_hvs_get_fifo_frame_count(struct vc4_hvs *hvs, unsigned int fifo)
587 struct drm_device *drm = &hvs->vc4->base;
591 if (!drm_dev_enter(drm, &idx))
596 field = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT1),
597 SCALER_DISPSTAT1_FRCNT0);
600 field = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT1),
601 SCALER_DISPSTAT1_FRCNT1);
604 field = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT2),
605 SCALER_DISPSTAT2_FRCNT2);
613 int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output)
615 struct vc4_dev *vc4 = hvs->vc4;
623 * NOTE: We should probably use drm_dev_enter()/drm_dev_exit()
624 * here, but this function is only used during the DRM device
625 * initialization, so we should be fine.
636 reg = HVS_READ(SCALER_DISPECTRL);
637 ret = FIELD_GET(SCALER_DISPECTRL_DSP2_MUX_MASK, reg);
644 reg = HVS_READ(SCALER_DISPCTRL);
645 ret = FIELD_GET(SCALER_DISPCTRL_DSP3_MUX_MASK, reg);
652 reg = HVS_READ(SCALER_DISPEOLN);
653 ret = FIELD_GET(SCALER_DISPEOLN_DSP4_MUX_MASK, reg);
660 reg = HVS_READ(SCALER_DISPDITHER);
661 ret = FIELD_GET(SCALER_DISPDITHER_DSP5_MUX_MASK, reg);
672 static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc,
673 struct drm_display_mode *mode, bool oneshot)
675 struct vc4_dev *vc4 = hvs->vc4;
676 struct drm_device *drm = &vc4->base;
677 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
678 struct vc4_crtc_state *vc4_crtc_state = to_vc4_crtc_state(crtc->state);
679 unsigned int chan = vc4_crtc_state->assigned_channel;
680 bool interlace = mode->flags & DRM_MODE_FLAG_INTERLACE;
685 if (!drm_dev_enter(drm, &idx))
688 HVS_WRITE(SCALER_DISPCTRLX(chan), 0);
689 HVS_WRITE(SCALER_DISPCTRLX(chan), SCALER_DISPCTRLX_RESET);
690 HVS_WRITE(SCALER_DISPCTRLX(chan), 0);
692 /* Turn on the scaler, which will wait for vstart to start
694 * When feeding the transposer, we should operate in oneshot
697 dispctrl = SCALER_DISPCTRLX_ENABLE;
698 dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(chan));
701 dispctrl |= VC4_SET_FIELD(mode->hdisplay,
702 SCALER_DISPCTRLX_WIDTH) |
703 VC4_SET_FIELD(mode->vdisplay,
704 SCALER_DISPCTRLX_HEIGHT) |
705 (oneshot ? SCALER_DISPCTRLX_ONESHOT : 0);
706 dispbkgndx |= SCALER_DISPBKGND_AUTOHS;
708 dispctrl |= VC4_SET_FIELD(mode->hdisplay,
709 SCALER5_DISPCTRLX_WIDTH) |
710 VC4_SET_FIELD(mode->vdisplay,
711 SCALER5_DISPCTRLX_HEIGHT) |
712 (oneshot ? SCALER5_DISPCTRLX_ONESHOT : 0);
713 dispbkgndx &= ~SCALER5_DISPBKGND_BCK2BCK;
716 HVS_WRITE(SCALER_DISPCTRLX(chan), dispctrl);
718 dispbkgndx &= ~SCALER_DISPBKGND_GAMMA;
719 dispbkgndx &= ~SCALER_DISPBKGND_INTERLACE;
721 if (crtc->state->gamma_lut)
722 /* Enable gamma on if required */
723 dispbkgndx |= SCALER_DISPBKGND_GAMMA;
725 HVS_WRITE(SCALER_DISPBKGNDX(chan), dispbkgndx |
726 (interlace ? SCALER_DISPBKGND_INTERLACE : 0));
728 /* Reload the LUT, since the SRAMs would have been disabled if
729 * all CRTCs had SCALER_DISPBKGND_GAMMA unset at once.
732 vc4_hvs_lut_load(hvs, vc4_crtc);
734 vc5_hvs_lut_load(hvs, vc4_crtc);
741 void vc4_hvs_stop_channel(struct vc4_hvs *hvs, unsigned int chan)
743 struct drm_device *drm = &hvs->vc4->base;
746 if (!drm_dev_enter(drm, &idx))
749 if (HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_ENABLE)
752 HVS_WRITE(SCALER_DISPCTRLX(chan),
753 HVS_READ(SCALER_DISPCTRLX(chan)) | SCALER_DISPCTRLX_RESET);
754 HVS_WRITE(SCALER_DISPCTRLX(chan),
755 HVS_READ(SCALER_DISPCTRLX(chan)) & ~SCALER_DISPCTRLX_ENABLE);
757 /* Once we leave, the scaler should be disabled and its fifo empty. */
758 WARN_ON_ONCE(HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_RESET);
760 WARN_ON_ONCE(VC4_GET_FIELD(HVS_READ(SCALER_DISPSTATX(chan)),
761 SCALER_DISPSTATX_MODE) !=
762 SCALER_DISPSTATX_MODE_DISABLED);
764 WARN_ON_ONCE((HVS_READ(SCALER_DISPSTATX(chan)) &
765 (SCALER_DISPSTATX_FULL | SCALER_DISPSTATX_EMPTY)) !=
766 SCALER_DISPSTATX_EMPTY);
772 static int vc4_hvs_gamma_check(struct drm_crtc *crtc,
773 struct drm_atomic_state *state)
775 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
776 struct drm_connector_state *conn_state;
777 struct drm_connector *connector;
778 struct drm_device *dev = crtc->dev;
779 struct vc4_dev *vc4 = to_vc4_dev(dev);
784 if (!crtc_state->color_mgmt_changed)
787 if (crtc_state->gamma_lut) {
788 unsigned int len = drm_color_lut_size(crtc_state->gamma_lut);
790 if (len != crtc->gamma_size) {
791 DRM_DEBUG_KMS("Invalid LUT size; got %u, expected %u\n",
792 len, crtc->gamma_size);
797 connector = vc4_get_crtc_connector(crtc, crtc_state);
801 if (!(connector->connector_type == DRM_MODE_CONNECTOR_HDMIA))
804 conn_state = drm_atomic_get_connector_state(state, connector);
808 crtc_state->mode_changed = true;
812 int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
814 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
815 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
816 struct vc4_hvs_dlist_allocation *alloc;
817 struct drm_device *dev = crtc->dev;
818 struct vc4_dev *vc4 = to_vc4_dev(dev);
819 struct drm_plane *plane;
820 const struct drm_plane_state *plane_state;
823 /* The pixelvalve can only feed one encoder (and encoders are
824 * 1:1 with connectors.)
826 if (hweight32(crtc_state->connector_mask) > 1)
829 drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
830 u32 plane_dlist_count = vc4_plane_dlist_size(plane_state);
832 drm_dbg_driver(dev, "[CRTC:%d:%s] Found [PLANE:%d:%s] with DLIST size: %u\n",
833 crtc->base.id, crtc->name,
834 plane->base.id, plane->name,
837 dlist_count += plane_dlist_count;
840 dlist_count++; /* Account for SCALER_CTL0_END. */
842 drm_dbg_driver(dev, "[CRTC:%d:%s] Allocating DLIST block with size: %u\n",
843 crtc->base.id, crtc->name, dlist_count);
845 alloc = vc4_hvs_alloc_dlist_entry(vc4->hvs, vc4_state->assigned_channel, dlist_count);
847 return PTR_ERR(alloc);
849 vc4_state->mm = alloc;
851 return vc4_hvs_gamma_check(crtc, state);
854 static void vc4_hvs_install_dlist(struct drm_crtc *crtc)
856 struct drm_device *dev = crtc->dev;
857 struct vc4_dev *vc4 = to_vc4_dev(dev);
858 struct vc4_hvs *hvs = vc4->hvs;
859 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
862 if (!drm_dev_enter(dev, &idx))
865 WARN_ON(!vc4_state->mm);
866 HVS_WRITE(SCALER_DISPLISTX(vc4_state->assigned_channel),
867 vc4_state->mm->mm_node.start);
872 static void vc4_hvs_update_dlist(struct drm_crtc *crtc)
874 struct drm_device *dev = crtc->dev;
875 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
876 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
879 if (crtc->state->event) {
880 crtc->state->event->pipe = drm_crtc_index(crtc);
882 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
884 spin_lock_irqsave(&dev->event_lock, flags);
886 if (!vc4_crtc->feeds_txp || vc4_state->txp_armed) {
887 vc4_crtc->event = crtc->state->event;
888 crtc->state->event = NULL;
891 spin_unlock_irqrestore(&dev->event_lock, flags);
894 WARN_ON(!vc4_state->mm);
896 spin_lock_irqsave(&vc4_crtc->irq_lock, flags);
897 vc4_crtc->current_dlist = vc4_state->mm->mm_node.start;
898 spin_unlock_irqrestore(&vc4_crtc->irq_lock, flags);
901 void vc4_hvs_atomic_begin(struct drm_crtc *crtc,
902 struct drm_atomic_state *state)
904 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
905 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
908 spin_lock_irqsave(&vc4_crtc->irq_lock, flags);
909 vc4_crtc->current_hvs_channel = vc4_state->assigned_channel;
910 spin_unlock_irqrestore(&vc4_crtc->irq_lock, flags);
913 void vc4_hvs_atomic_enable(struct drm_crtc *crtc,
914 struct drm_atomic_state *state)
916 struct drm_device *dev = crtc->dev;
917 struct vc4_dev *vc4 = to_vc4_dev(dev);
918 struct drm_display_mode *mode = &crtc->state->adjusted_mode;
919 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
920 bool oneshot = vc4_crtc->feeds_txp;
922 vc4_hvs_install_dlist(crtc);
923 vc4_hvs_update_dlist(crtc);
924 vc4_hvs_init_channel(vc4->hvs, crtc, mode, oneshot);
927 void vc4_hvs_atomic_disable(struct drm_crtc *crtc,
928 struct drm_atomic_state *state)
930 struct drm_device *dev = crtc->dev;
931 struct vc4_dev *vc4 = to_vc4_dev(dev);
932 struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, crtc);
933 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(old_state);
934 unsigned int chan = vc4_state->assigned_channel;
936 vc4_hvs_stop_channel(vc4->hvs, chan);
939 void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
940 struct drm_atomic_state *state)
942 struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
944 struct drm_device *dev = crtc->dev;
945 struct vc4_dev *vc4 = to_vc4_dev(dev);
946 struct vc4_hvs *hvs = vc4->hvs;
947 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
948 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
949 unsigned int channel = vc4_state->assigned_channel;
950 struct drm_plane *plane;
951 struct vc4_plane_state *vc4_plane_state;
952 bool debug_dump_regs = false;
953 bool enable_bg_fill = false;
954 u32 __iomem *dlist_start, *dlist_next;
955 unsigned int zpos = 0;
959 if (!drm_dev_enter(dev, &idx)) {
960 vc4_crtc_send_vblank(crtc);
964 if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
967 if (debug_dump_regs) {
968 DRM_INFO("CRTC %d HVS before:\n", drm_crtc_index(crtc));
969 vc4_hvs_dump_state(hvs);
972 dlist_start = vc4->hvs->dlist + vc4_state->mm->mm_node.start;
973 dlist_next = dlist_start;
975 /* Copy all the active planes' dlist contents to the hardware dlist. */
979 drm_atomic_crtc_for_each_plane(plane, crtc) {
980 if (plane->state->normalized_zpos != zpos)
983 /* Is this the first active plane? */
984 if (dlist_next == dlist_start) {
985 /* We need to enable background fill when a plane
986 * could be alpha blending from the background, i.e.
987 * where no other plane is underneath. It suffices to
988 * consider the first active plane here since we set
989 * needs_bg_fill such that either the first plane
990 * already needs it or all planes on top blend from
991 * the first or a lower plane.
993 vc4_plane_state = to_vc4_plane_state(plane->state);
994 enable_bg_fill = vc4_plane_state->needs_bg_fill;
997 dlist_next += vc4_plane_write_dlist(plane, dlist_next);
1005 writel(SCALER_CTL0_END, dlist_next);
1008 WARN_ON(!vc4_state->mm);
1009 WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm->mm_node.size);
1012 /* This sets a black background color fill, as is the case
1013 * with other DRM drivers.
1015 HVS_WRITE(SCALER_DISPBKGNDX(channel),
1016 HVS_READ(SCALER_DISPBKGNDX(channel)) |
1017 SCALER_DISPBKGND_FILL);
1019 /* Only update DISPLIST if the CRTC was already running and is not
1021 * vc4_crtc_enable() takes care of updating the dlist just after
1022 * re-enabling VBLANK interrupts and before enabling the engine.
1023 * If the CRTC is being disabled, there's no point in updating this
1026 if (crtc->state->active && old_state->active) {
1027 vc4_hvs_install_dlist(crtc);
1028 vc4_hvs_update_dlist(crtc);
1031 if (crtc->state->color_mgmt_changed) {
1032 u32 dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(channel));
1034 if (crtc->state->gamma_lut) {
1036 vc4_hvs_update_gamma_lut(hvs, vc4_crtc);
1037 dispbkgndx |= SCALER_DISPBKGND_GAMMA;
1039 vc5_hvs_update_gamma_lut(hvs, vc4_crtc);
1042 /* Unsetting DISPBKGND_GAMMA skips the gamma lut step
1043 * in hardware, which is the same as a linear lut that
1044 * DRM expects us to use in absence of a user lut.
1046 * Do NOT change state dynamically for hvs5 as it
1047 * inserts a delay in the pipeline that will cause
1048 * stalls if enabled/disabled whilst running. The other
1049 * should already be disabling/enabling the pipeline
1050 * when gamma changes.
1053 dispbkgndx &= ~SCALER_DISPBKGND_GAMMA;
1055 HVS_WRITE(SCALER_DISPBKGNDX(channel), dispbkgndx);
1058 if (debug_dump_regs) {
1059 DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
1060 vc4_hvs_dump_state(hvs);
1066 void vc4_hvs_mask_underrun(struct vc4_hvs *hvs, int channel)
1068 struct drm_device *drm = &hvs->vc4->base;
1072 if (!drm_dev_enter(drm, &idx))
1075 dispctrl = HVS_READ(SCALER_DISPCTRL);
1076 dispctrl &= ~(hvs->vc4->is_vc5 ? SCALER5_DISPCTRL_DSPEISLUR(channel) :
1077 SCALER_DISPCTRL_DSPEISLUR(channel));
1079 HVS_WRITE(SCALER_DISPCTRL, dispctrl);
1084 void vc4_hvs_unmask_underrun(struct vc4_hvs *hvs, int channel)
1086 struct drm_device *drm = &hvs->vc4->base;
1090 if (!drm_dev_enter(drm, &idx))
1093 dispctrl = HVS_READ(SCALER_DISPCTRL);
1094 dispctrl |= (hvs->vc4->is_vc5 ? SCALER5_DISPCTRL_DSPEISLUR(channel) :
1095 SCALER_DISPCTRL_DSPEISLUR(channel));
1097 HVS_WRITE(SCALER_DISPSTAT,
1098 SCALER_DISPSTAT_EUFLOW(channel));
1099 HVS_WRITE(SCALER_DISPCTRL, dispctrl);
1104 static void vc4_hvs_report_underrun(struct drm_device *dev)
1106 struct vc4_dev *vc4 = to_vc4_dev(dev);
1108 atomic_inc(&vc4->underrun);
1109 DRM_DEV_ERROR(dev->dev, "HVS underrun\n");
1112 static irqreturn_t vc4_hvs_irq_handler(int irq, void *data)
1114 struct drm_device *dev = data;
1115 struct vc4_dev *vc4 = to_vc4_dev(dev);
1116 struct vc4_hvs *hvs = vc4->hvs;
1117 irqreturn_t irqret = IRQ_NONE;
1124 * NOTE: We don't need to protect the register access using
1125 * drm_dev_enter() there because the interrupt handler lifetime
1126 * is tied to the device itself, and not to the DRM device.
1128 * So when the device will be gone, one of the first thing we
1129 * will be doing will be to unregister the interrupt handler,
1130 * and then unregister the DRM device. drm_dev_enter() would
1131 * thus always succeed if we are here.
1134 status = HVS_READ(SCALER_DISPSTAT);
1135 control = HVS_READ(SCALER_DISPCTRL);
1137 for (channel = 0; channel < SCALER_CHANNELS_COUNT; channel++) {
1138 dspeislur = vc4->is_vc5 ? SCALER5_DISPCTRL_DSPEISLUR(channel) :
1139 SCALER_DISPCTRL_DSPEISLUR(channel);
1140 /* Interrupt masking is not always honored, so check it here. */
1141 if (status & SCALER_DISPSTAT_EUFLOW(channel) &&
1142 control & dspeislur) {
1143 vc4_hvs_mask_underrun(hvs, channel);
1144 vc4_hvs_report_underrun(dev);
1146 irqret = IRQ_HANDLED;
1149 if (status & SCALER_DISPSTAT_EOF(channel)) {
1150 vc4_hvs_schedule_dlist_sweep(hvs, channel);
1151 irqret = IRQ_HANDLED;
1155 /* Clear every per-channel interrupt flag. */
1156 HVS_WRITE(SCALER_DISPSTAT, SCALER_DISPSTAT_IRQMASK(0) |
1157 SCALER_DISPSTAT_IRQMASK(1) |
1158 SCALER_DISPSTAT_IRQMASK(2));
1163 int vc4_hvs_debugfs_init(struct drm_minor *minor)
1165 struct drm_device *drm = minor->dev;
1166 struct vc4_dev *vc4 = to_vc4_dev(drm);
1167 struct vc4_hvs *hvs = vc4->hvs;
1169 if (vc4->firmware_kms)
1176 debugfs_create_bool("hvs_load_tracker", S_IRUGO | S_IWUSR,
1177 minor->debugfs_root,
1178 &vc4->load_tracker_enabled);
1180 drm_debugfs_add_file(drm, "hvs_gamma", vc5_hvs_debugfs_gamma,
1184 drm_debugfs_add_file(drm, "hvs_dlists", vc4_hvs_debugfs_dlist, NULL);
1186 drm_debugfs_add_file(drm, "hvs_underrun", vc4_hvs_debugfs_underrun, NULL);
1188 vc4_debugfs_add_regset32(drm, "hvs_regs", &hvs->regset);
1193 struct vc4_hvs *__vc4_hvs_alloc(struct vc4_dev *vc4, struct platform_device *pdev)
1195 struct drm_device *drm = &vc4->base;
1196 struct vc4_hvs *hvs;
1198 hvs = drmm_kzalloc(drm, sizeof(*hvs), GFP_KERNEL);
1200 return ERR_PTR(-ENOMEM);
1205 spin_lock_init(&hvs->mm_lock);
1207 INIT_LIST_HEAD(&hvs->stale_dlist_entries);
1208 INIT_WORK(&hvs->free_dlist_work, vc4_hvs_dlist_free_work);
1210 /* Set up the HVS display list memory manager. We never
1211 * overwrite the setup from the bootloader (just 128b out of
1212 * our 16K), since we don't want to scramble the screen when
1213 * transitioning from the firmware's boot setup to runtime.
1215 drm_mm_init(&hvs->dlist_mm,
1216 HVS_BOOTLOADER_DLIST_END,
1217 (SCALER_DLIST_SIZE >> 2) - HVS_BOOTLOADER_DLIST_END);
1219 /* Set up the HVS LBM memory manager. We could have some more
1220 * complicated data structure that allowed reuse of LBM areas
1221 * between planes when they don't overlap on the screen, but
1222 * for now we just allocate globally.
1225 /* 48k words of 2x12-bit pixels */
1226 drm_mm_init(&hvs->lbm_mm, 0, 48 * 1024);
1228 /* 60k words of 4x12-bit pixels */
1229 drm_mm_init(&hvs->lbm_mm, 0, 60 * 1024);
1236 static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
1238 struct platform_device *pdev = to_platform_device(dev);
1239 struct drm_device *drm = dev_get_drvdata(master);
1240 struct vc4_dev *vc4 = to_vc4_dev(drm);
1241 struct vc4_hvs *hvs = NULL;
1246 hvs = __vc4_hvs_alloc(vc4, NULL);
1248 return PTR_ERR(hvs);
1250 hvs->regs = vc4_ioremap_regs(pdev, 0);
1251 if (IS_ERR(hvs->regs))
1252 return PTR_ERR(hvs->regs);
1254 hvs->regset.base = hvs->regs;
1255 hvs->regset.regs = hvs_regs;
1256 hvs->regset.nregs = ARRAY_SIZE(hvs_regs);
1259 struct rpi_firmware *firmware;
1260 struct device_node *node;
1261 unsigned int max_rate;
1263 node = rpi_firmware_find_node();
1267 firmware = rpi_firmware_get(node);
1270 return -EPROBE_DEFER;
1272 hvs->core_clk = devm_clk_get(&pdev->dev, NULL);
1273 if (IS_ERR(hvs->core_clk)) {
1274 dev_err(&pdev->dev, "Couldn't get core clock\n");
1275 return PTR_ERR(hvs->core_clk);
1278 max_rate = rpi_firmware_clk_get_max_rate(firmware,
1279 RPI_FIRMWARE_CORE_CLK_ID);
1280 rpi_firmware_put(firmware);
1281 if (max_rate >= 550000000)
1282 hvs->vc5_hdmi_enable_hdmi_20 = true;
1284 if (max_rate >= 600000000)
1285 hvs->vc5_hdmi_enable_4096by2160 = true;
1287 hvs->max_core_rate = max_rate;
1289 ret = clk_prepare_enable(hvs->core_clk);
1291 dev_err(&pdev->dev, "Couldn't enable the core clock\n");
1297 hvs->dlist = hvs->regs + SCALER_DLIST_START;
1299 hvs->dlist = hvs->regs + SCALER5_DLIST_START;
1301 /* Upload filter kernels. We only have the one for now, so we
1302 * keep it around for the lifetime of the driver.
1304 ret = vc4_hvs_upload_linear_kernel(hvs,
1305 &hvs->mitchell_netravali_filter,
1306 mitchell_netravali_1_3_1_3_kernel);
1310 reg = HVS_READ(SCALER_DISPECTRL);
1311 reg &= ~SCALER_DISPECTRL_DSP2_MUX_MASK;
1312 HVS_WRITE(SCALER_DISPECTRL,
1313 reg | VC4_SET_FIELD(0, SCALER_DISPECTRL_DSP2_MUX));
1315 reg = HVS_READ(SCALER_DISPCTRL);
1316 reg &= ~SCALER_DISPCTRL_DSP3_MUX_MASK;
1317 HVS_WRITE(SCALER_DISPCTRL,
1318 reg | VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX));
1320 reg = HVS_READ(SCALER_DISPEOLN);
1321 reg &= ~SCALER_DISPEOLN_DSP4_MUX_MASK;
1322 HVS_WRITE(SCALER_DISPEOLN,
1323 reg | VC4_SET_FIELD(3, SCALER_DISPEOLN_DSP4_MUX));
1325 reg = HVS_READ(SCALER_DISPDITHER);
1326 reg &= ~SCALER_DISPDITHER_DSP5_MUX_MASK;
1327 HVS_WRITE(SCALER_DISPDITHER,
1328 reg | VC4_SET_FIELD(3, SCALER_DISPDITHER_DSP5_MUX));
1330 dispctrl = HVS_READ(SCALER_DISPCTRL);
1332 dispctrl |= SCALER_DISPCTRL_ENABLE;
1333 dispctrl |= SCALER_DISPCTRL_DISPEIRQ(0) |
1334 SCALER_DISPCTRL_DISPEIRQ(1) |
1335 SCALER_DISPCTRL_DISPEIRQ(2);
1338 dispctrl &= ~(SCALER_DISPCTRL_DMAEIRQ |
1339 SCALER_DISPCTRL_SLVWREIRQ |
1340 SCALER_DISPCTRL_SLVRDEIRQ |
1341 SCALER_DISPCTRL_DSPEIEOF(0) |
1342 SCALER_DISPCTRL_DSPEIEOF(1) |
1343 SCALER_DISPCTRL_DSPEIEOF(2) |
1344 SCALER_DISPCTRL_DSPEIEOLN(0) |
1345 SCALER_DISPCTRL_DSPEIEOLN(1) |
1346 SCALER_DISPCTRL_DSPEIEOLN(2) |
1347 SCALER_DISPCTRL_DSPEISLUR(0) |
1348 SCALER_DISPCTRL_DSPEISLUR(1) |
1349 SCALER_DISPCTRL_DSPEISLUR(2) |
1350 SCALER_DISPCTRL_SCLEIRQ);
1352 dispctrl &= ~(SCALER_DISPCTRL_DMAEIRQ |
1353 SCALER5_DISPCTRL_SLVEIRQ |
1354 SCALER5_DISPCTRL_DSPEIEOF(0) |
1355 SCALER5_DISPCTRL_DSPEIEOF(1) |
1356 SCALER5_DISPCTRL_DSPEIEOF(2) |
1357 SCALER5_DISPCTRL_DSPEIEOLN(0) |
1358 SCALER5_DISPCTRL_DSPEIEOLN(1) |
1359 SCALER5_DISPCTRL_DSPEIEOLN(2) |
1360 SCALER5_DISPCTRL_DSPEISLUR(0) |
1361 SCALER5_DISPCTRL_DSPEISLUR(1) |
1362 SCALER5_DISPCTRL_DSPEISLUR(2) |
1363 SCALER_DISPCTRL_SCLEIRQ);
1366 /* Set AXI panic mode.
1367 * VC4 panics when < 2 lines in FIFO.
1368 * VC5 panics when less than 1 line in the FIFO.
1370 dispctrl &= ~(SCALER_DISPCTRL_PANIC0_MASK |
1371 SCALER_DISPCTRL_PANIC1_MASK |
1372 SCALER_DISPCTRL_PANIC2_MASK);
1373 dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC0);
1374 dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC1);
1375 dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC2);
1377 /* Set AXI panic mode.
1378 * VC4 panics when < 2 lines in FIFO.
1379 * VC5 panics when less than 1 line in the FIFO.
1381 dispctrl &= ~(SCALER_DISPCTRL_PANIC0_MASK |
1382 SCALER_DISPCTRL_PANIC1_MASK |
1383 SCALER_DISPCTRL_PANIC2_MASK);
1384 dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC0);
1385 dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC1);
1386 dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC2);
1388 HVS_WRITE(SCALER_DISPCTRL, dispctrl);
1390 /* Recompute Composite Output Buffer (COB) allocations for the displays
1393 /* The COB is 20736 pixels, or just over 10 lines at 2048 wide.
1394 * The bottom 2048 pixels are full 32bpp RGBA (intended for the
1395 * TXP composing RGBA to memory), whilst the remainder are only
1398 * Assign 3 lines to channels 1 & 2, and just over 4 lines to
1401 #define VC4_COB_SIZE 20736
1402 #define VC4_COB_LINE_WIDTH 2048
1403 #define VC4_COB_NUM_LINES 3
1405 top = VC4_COB_LINE_WIDTH * VC4_COB_NUM_LINES;
1406 reg |= (top - 1) << 16;
1407 HVS_WRITE(SCALER_DISPBASE2, reg);
1409 top += VC4_COB_LINE_WIDTH * VC4_COB_NUM_LINES;
1410 reg |= (top - 1) << 16;
1411 HVS_WRITE(SCALER_DISPBASE1, reg);
1414 reg |= (top - 1) << 16;
1415 HVS_WRITE(SCALER_DISPBASE0, reg);
1417 /* The COB is 44416 pixels, or 10.8 lines at 4096 wide.
1418 * The bottom 4096 pixels are full RGBA (intended for the TXP
1419 * composing RGBA to memory), whilst the remainder are only
1420 * RGB. Addressing is always pixel wide.
1422 * Assign 3 lines of 4096 to channels 1 & 2, and just over 4
1423 * lines. to channel 0.
1425 #define VC5_COB_SIZE 44416
1426 #define VC5_COB_LINE_WIDTH 4096
1427 #define VC5_COB_NUM_LINES 3
1429 top = VC5_COB_LINE_WIDTH * VC5_COB_NUM_LINES;
1431 HVS_WRITE(SCALER_DISPBASE2, reg);
1434 top += VC5_COB_LINE_WIDTH * VC5_COB_NUM_LINES;
1436 HVS_WRITE(SCALER_DISPBASE1, reg);
1441 HVS_WRITE(SCALER_DISPBASE0, reg);
1444 ret = devm_request_irq(dev, platform_get_irq(pdev, 0),
1445 vc4_hvs_irq_handler, 0, "vc4 hvs", drm);
1452 static void vc4_hvs_unbind(struct device *dev, struct device *master,
1455 struct drm_device *drm = dev_get_drvdata(master);
1456 struct vc4_dev *vc4 = to_vc4_dev(drm);
1457 struct vc4_hvs *hvs = vc4->hvs;
1458 struct drm_mm_node *node, *next;
1460 if (drm_mm_node_allocated(&vc4->hvs->mitchell_netravali_filter))
1461 drm_mm_remove_node(&vc4->hvs->mitchell_netravali_filter);
1463 drm_mm_for_each_node_safe(node, next, &vc4->hvs->dlist_mm)
1464 drm_mm_remove_node(node);
1466 drm_mm_takedown(&vc4->hvs->dlist_mm);
1468 drm_mm_for_each_node_safe(node, next, &vc4->hvs->lbm_mm)
1469 drm_mm_remove_node(node);
1470 drm_mm_takedown(&vc4->hvs->lbm_mm);
1472 clk_disable_unprepare(hvs->core_clk);
1477 static const struct component_ops vc4_hvs_ops = {
1478 .bind = vc4_hvs_bind,
1479 .unbind = vc4_hvs_unbind,
1482 static int vc4_hvs_dev_probe(struct platform_device *pdev)
1484 return component_add(&pdev->dev, &vc4_hvs_ops);
1487 static void vc4_hvs_dev_remove(struct platform_device *pdev)
1489 component_del(&pdev->dev, &vc4_hvs_ops);
1492 static const struct of_device_id vc4_hvs_dt_match[] = {
1493 { .compatible = "brcm,bcm2711-hvs" },
1494 { .compatible = "brcm,bcm2835-hvs" },
1498 struct platform_driver vc4_hvs_driver = {
1499 .probe = vc4_hvs_dev_probe,
1500 .remove_new = vc4_hvs_dev_remove,
1503 .of_match_table = vc4_hvs_dt_match,